Github user andrewor14 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/8161#discussion_r39004505
  
    --- Diff: project/MimaExcludes.scala ---
    @@ -32,635 +32,638 @@ import com.typesafe.tools.mima.core.ProblemFilters._
      * 
MimaBuild.excludeSparkClass("graphx.util.collection.GraphXPrimitiveKeyOpenHashMap")
      */
     object MimaExcludes {
    -    def excludes(version: String) =
    -      version match {
    -        case v if v.startsWith("1.5") =>
    -          Seq(
    -            MimaBuild.excludeSparkPackage("deploy"),
    -            MimaBuild.excludeSparkPackage("network"),
    -            // These are needed if checking against the sbt build, since 
they are part of
    -            // the maven-generated artifacts in 1.3.
    -            excludePackage("org.spark-project.jetty"),
    -            MimaBuild.excludeSparkPackage("unused"),
    -            // JavaRDDLike is not meant to be extended by user programs
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.partitioner"),
    -            // Modification of private static method
    -            ProblemFilters.exclude[IncompatibleMethTypeProblem](
    -              
"org.apache.spark.streaming.kafka.KafkaUtils.org$apache$spark$streaming$kafka$KafkaUtils$$leadersForRanges"),
    -            // Mima false positive (was a private[spark] class)
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.util.collection.PairIterator"),
    -            // Removing a testing method from a private class
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.streaming.kafka.KafkaTestUtils.waitUntilLeaderOffset"),
    -            // While private MiMa is still not happy about the changes,
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.ml.regression.LeastSquaresAggregator.this"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.ml.regression.LeastSquaresCostFun.this"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.ml.classification.LogisticCostFun.this"),
    -            // SQL execution is considered private.
    -            excludePackage("org.apache.spark.sql.execution"),
    -            // The old JSON RDD is removed in favor of streaming Jackson
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.json.JsonRDD$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.json.JsonRDD"),
    -            // local function inside a method
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.sql.SQLContext.org$apache$spark$sql$SQLContext$$needsConversion$1"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.sql.UDFRegistration.org$apache$spark$sql$UDFRegistration$$builder$24")
    -          ) ++ Seq(
    -            // SPARK-8479 Add numNonzeros and numActives to Matrix.
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrix.numNonzeros"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrix.numActives")
    -          ) ++ Seq(
    -            // SPARK-8914 Remove RDDApi
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.RDDApi")
    -          ) ++ Seq(
    -            // SPARK-7292 Provide operator to truncate lineage cheaply
    -            ProblemFilters.exclude[AbstractClassProblem](
    -              "org.apache.spark.rdd.RDDCheckpointData"),
    -            ProblemFilters.exclude[AbstractClassProblem](
    -              "org.apache.spark.rdd.CheckpointRDD")
    -          ) ++ Seq(
    -            // SPARK-8701 Add input metadata in the batch page.
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.streaming.scheduler.InputInfo$"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.streaming.scheduler.InputInfo")
    -          ) ++ Seq(
    -            // SPARK-6797 Support YARN modes for SparkR
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.r.PairwiseRRDD.this"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.r.RRDD.createRWorker"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.r.RRDD.this"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.r.StringRRDD.this"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.r.BaseRRDD.this")
    -          ) ++ Seq(
    -            // SPARK-7422 add argmax for sparse vectors
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Vector.argmax")
    -          ) ++ Seq(
    -            // SPARK-8906 Move all internal data source classes into 
execution.datasources
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.ResolvedDataSource"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreInsertCastAndRename$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsingAsSelect$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoDataSource$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopPartition"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils$PartitionValues$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DefaultWriterContainer"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils$PartitionValues"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.RefreshTable$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsing$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitionSpec"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DynamicPartitionWriterContainer"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsingAsSelect"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DescribeCommand$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreInsertCastAndRename"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.Partition$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.LogicalRelation$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.LogicalRelation"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.Partition"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.BaseWriterContainer"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreWriteCheck"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsing"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.RefreshTable"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD$NewHadoopMapPartitionsWithSplitRDD"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DataSourceStrategy$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsing"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsingAsSelect$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsingAsSelect"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsing$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.ResolvedDataSource$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreWriteCheck$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoDataSource"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoHadoopFsRelation"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DDLParser"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CaseInsensitiveMap"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoHadoopFsRelation$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DataSourceStrategy"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD$NewHadoopMapPartitionsWithSplitRDD$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitionSpec$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DescribeCommand"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DDLException"),
    -            // SPARK-9763 Minimize exposure of internal SQL classes
    -            excludePackage("org.apache.spark.sql.parquet"),
    -            excludePackage("org.apache.spark.sql.json"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$DecimalConversion$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartition"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JdbcUtils$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$DecimalConversion"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartitioningInfo$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartition$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.package"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$JDBCConversion"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.package$DriverWrapper"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartitioningInfo"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JdbcUtils"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.DefaultSource"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRelation$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.package$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRelation")
    -          ) ++ Seq(
    -            // SPARK-4751 Dynamic allocation for standalone mode
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.SparkContext.supportDynamicAllocation")
    -          ) ++ Seq(
    -            // SPARK-9580: Remove SQL test singletons
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.test.LocalSQLContext$SQLSession"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.test.LocalSQLContext"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.test.TestSQLContext"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.test.TestSQLContext$")
    -          ) ++ Seq(
    -            // SPARK-9704 Made ProbabilisticClassifier, Identifiable, 
VectorUDT public APIs
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              "org.apache.spark.mllib.linalg.VectorUDT.serialize")
    -          )
    +  def excludes(version: String) = version match {
    +    case v if v.startsWith("1.6") =>
    +      Seq(
    +        MimaBuild.excludeSparkPackage("network")
    +        )
    +    case v if v.startsWith("1.5") =>
    +      Seq(
    +        MimaBuild.excludeSparkPackage("network"),
    +        MimaBuild.excludeSparkPackage("deploy"),
    +        // These are needed if checking against the sbt build, since they 
are part of
    +        // the maven-generated artifacts in 1.3.
    +        excludePackage("org.spark-project.jetty"),
    +        MimaBuild.excludeSparkPackage("unused"),
    +        // JavaRDDLike is not meant to be extended by user programs
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.java.JavaRDDLike.partitioner"),
    +        // Modification of private static method
    +        ProblemFilters.exclude[IncompatibleMethTypeProblem](
    +          
"org.apache.spark.streaming.kafka.KafkaUtils.org$apache$spark$streaming$kafka$KafkaUtils$$leadersForRanges"),
    +        // Mima false positive (was a private[spark] class)
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.util.collection.PairIterator"),
    +        // Removing a testing method from a private class
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.streaming.kafka.KafkaTestUtils.waitUntilLeaderOffset"),
    +        // While private MiMa is still not happy about the changes,
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.ml.regression.LeastSquaresAggregator.this"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.ml.regression.LeastSquaresCostFun.this"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.ml.classification.LogisticCostFun.this"),
    +        // SQL execution is considered private.
    +        excludePackage("org.apache.spark.sql.execution"),
    +        // The old JSON RDD is removed in favor of streaming Jackson
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.json.JsonRDD$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.json.JsonRDD"),
    +        // local function inside a method
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.sql.SQLContext.org$apache$spark$sql$SQLContext$$needsConversion$1"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.sql.UDFRegistration.org$apache$spark$sql$UDFRegistration$$builder$24")
    +      ) ++ Seq(
    +        // SPARK-8479 Add numNonzeros and numActives to Matrix.
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrix.numNonzeros"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrix.numActives")
    +      ) ++ Seq(
    +        // SPARK-8914 Remove RDDApi
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.RDDApi")
    +      ) ++ Seq(
    +        // SPARK-7292 Provide operator to truncate lineage cheaply
    +        ProblemFilters.exclude[AbstractClassProblem](
    +          "org.apache.spark.rdd.RDDCheckpointData"),
    +        ProblemFilters.exclude[AbstractClassProblem](
    +          "org.apache.spark.rdd.CheckpointRDD")
    +      ) ++ Seq(
    +        // SPARK-8701 Add input metadata in the batch page.
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.streaming.scheduler.InputInfo$"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.streaming.scheduler.InputInfo")
    +      ) ++ Seq(
    +        // SPARK-6797 Support YARN modes for SparkR
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.r.PairwiseRRDD.this"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.r.RRDD.createRWorker"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.r.RRDD.this"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.r.StringRRDD.this"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.r.BaseRRDD.this")
    +      ) ++ Seq(
    +        // SPARK-7422 add argmax for sparse vectors
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Vector.argmax")
    +      ) ++ Seq(
    +        // SPARK-8906 Move all internal data source classes into 
execution.datasources
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.ResolvedDataSource"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreInsertCastAndRename$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsingAsSelect$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoDataSource$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopPartition"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils$PartitionValues$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DefaultWriterContainer"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils$PartitionValues"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.RefreshTable$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsing$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitionSpec"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DynamicPartitionWriterContainer"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsingAsSelect"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DescribeCommand$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreInsertCastAndRename"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.Partition$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.LogicalRelation$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitioningUtils"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.LogicalRelation"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.Partition"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.BaseWriterContainer"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreWriteCheck"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsing"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.RefreshTable"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD$NewHadoopMapPartitionsWithSplitRDD"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DataSourceStrategy$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsing"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsingAsSelect$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTempTableUsingAsSelect"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CreateTableUsing$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.ResolvedDataSource$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PreWriteCheck$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoDataSource"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoHadoopFsRelation"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DDLParser"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.CaseInsensitiveMap"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.InsertIntoHadoopFsRelation$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DataSourceStrategy"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.SqlNewHadoopRDD$NewHadoopMapPartitionsWithSplitRDD$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.PartitionSpec$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DescribeCommand"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.DDLException"),
    +        // SPARK-9763 Minimize exposure of internal SQL classes
    +        excludePackage("org.apache.spark.sql.parquet"),
    +        excludePackage("org.apache.spark.sql.json"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$DecimalConversion$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartition"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JdbcUtils$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$DecimalConversion"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartitioningInfo$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartition$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.package"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$JDBCConversion"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.package$DriverWrapper"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRDD"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCPartitioningInfo"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JdbcUtils"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.DefaultSource"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRelation$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.package$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.JDBCRelation")
    +      ) ++ Seq(
    +        // SPARK-4751 Dynamic allocation for standalone mode
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.SparkContext.supportDynamicAllocation")
    +      ) ++ Seq(
    +        // SPARK-9580: Remove SQL test singletons
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.test.LocalSQLContext$SQLSession"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.test.LocalSQLContext"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.test.TestSQLContext"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.test.TestSQLContext$")
    +      ) ++ Seq(
    +        // SPARK-9704 Made ProbabilisticClassifier, Identifiable, 
VectorUDT public APIs
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem](
    +          "org.apache.spark.mllib.linalg.VectorUDT.serialize")
    +      )
     
    -        case v if v.startsWith("1.4") =>
    -          Seq(
    -            MimaBuild.excludeSparkPackage("deploy"),
    -            MimaBuild.excludeSparkPackage("ml"),
    -            // SPARK-7910 Adding a method to get the partioner to JavaRDD,
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.partitioner"),
    -            // SPARK-5922 Adding a generalized diff(other: RDD[(VertexId, 
VD)]) to VertexRDD
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.VertexRDD.diff"),
    -            // These are needed if checking against the sbt build, since 
they are part of
    -            // the maven-generated artifacts in 1.3.
    -            excludePackage("org.spark-project.jetty"),
    -            MimaBuild.excludeSparkPackage("unused"),
    -            
ProblemFilters.exclude[MissingClassProblem]("com.google.common.base.Optional"),
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              "org.apache.spark.rdd.JdbcRDD.compute"),
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              
"org.apache.spark.broadcast.HttpBroadcastFactory.newBroadcast"),
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              
"org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              
"org.apache.spark.scheduler.OutputCommitCoordinator$OutputCommitCoordinatorEndpoint")
    -          ) ++ Seq(
    -            // SPARK-4655 - Making Stage an Abstract class broke binary 
compatility even though
    -            // the stage class is defined as private[spark]
    -            
ProblemFilters.exclude[AbstractClassProblem]("org.apache.spark.scheduler.Stage")
    -          ) ++ Seq(
    -            // SPARK-6510 Add a Graph#minus method acting as Set#difference
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.VertexRDD.minus")
    -          ) ++ Seq(
    -            // SPARK-6492 Fix deadlock in SparkContext.stop()
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.org$"
 +
    -                
"apache$spark$SparkContext$$SPARK_CONTEXT_CONSTRUCTOR_LOCK")
    -          )++ Seq(
    -            // SPARK-6693 add tostring with max lines and width for matrix
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrix.toString")
    -          )++ Seq(
    -            // SPARK-6703 Add getOrCreate method to SparkContext
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem]
    -                
("org.apache.spark.SparkContext.org$apache$spark$SparkContext$$activeContext")
    -          )++ Seq(
    -            // SPARK-7090 Introduce LDAOptimizer to LDA to further improve 
extensibility
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.mllib.clustering.LDA$EMOptimizer")
    -          ) ++ Seq(
    -            // SPARK-6756 add toSparse, toDense, numActives, numNonzeros, 
and compressed to Vector
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Vector.compressed"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Vector.toDense"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Vector.numNonzeros"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Vector.toSparse"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Vector.numActives"),
    -            // SPARK-7681 add SparseVector support for gemv
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrix.multiply"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.DenseMatrix.multiply"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.SparseMatrix.multiply")
    -          ) ++ Seq(
    -            // Execution should never be included as its always internal.
    -            MimaBuild.excludeSparkPackage("sql.execution"),
    -            // This `protected[sql]` method was removed in 1.3.1
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.sql.SQLContext.checkAnalysis"),
    -            // These `private[sql]` class were removed in 1.4.0:
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.execution.AddExchange"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.execution.AddExchange$"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.PartitionSpec"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.PartitionSpec$"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.Partition"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.Partition$"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              
"org.apache.spark.sql.parquet.ParquetRelation2$PartitionValues"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              
"org.apache.spark.sql.parquet.ParquetRelation2$PartitionValues$"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.ParquetRelation2"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.ParquetRelation2$"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              
"org.apache.spark.sql.parquet.ParquetRelation2$MetadataCache"),
    -            // These test support classes were moved out of src/main and 
into src/test:
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.ParquetTestData"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.ParquetTestData$"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.TestGroupWriteSupport"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.CachedData"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.CachedData$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.CacheManager"),
    -            // TODO: Remove the following rule once ParquetTest has been 
moved to src/test.
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.sql.parquet.ParquetTest")
    -          ) ++ Seq(
    -            // SPARK-7530 Added StreamingContext.getState()
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.streaming.StreamingContext.state_=")
    -          ) ++ Seq(
    -            // SPARK-7081 changed ShuffleWriter from a trait to an 
abstract class and removed some
    -            // unnecessary type bounds in order to fix some compiler 
warnings that occurred when
    -            // implementing this interface in Java. Note that 
ShuffleWriter is private[spark].
    -            ProblemFilters.exclude[IncompatibleTemplateDefProblem](
    -              "org.apache.spark.shuffle.ShuffleWriter")
    -          ) ++ Seq(
    -            // SPARK-6888 make jdbc driver handling user definable
    -            // This patch renames some classes to API friendly names.
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.DriverQuirks$"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.DriverQuirks"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.PostgresQuirks"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.NoQuirks"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.MySQLQuirks")
    -          )
    +    case v if v.startsWith("1.4") =>
    +      Seq(
    +        MimaBuild.excludeSparkPackage("deploy"),
    +        MimaBuild.excludeSparkPackage("ml"),
    +        // SPARK-7910 Adding a method to get the partioner to JavaRDD,
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.partitioner"),
    +        // SPARK-5922 Adding a generalized diff(other: RDD[(VertexId, 
VD)]) to VertexRDD
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.VertexRDD.diff"),
    +        // These are needed if checking against the sbt build, since they 
are part of
    +        // the maven-generated artifacts in 1.3.
    +        excludePackage("org.spark-project.jetty"),
    +        MimaBuild.excludeSparkPackage("unused"),
    +        
ProblemFilters.exclude[MissingClassProblem]("com.google.common.base.Optional"),
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem](
    +          "org.apache.spark.rdd.JdbcRDD.compute"),
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem](
    +          "org.apache.spark.broadcast.HttpBroadcastFactory.newBroadcast"),
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem](
    +          
"org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          
"org.apache.spark.scheduler.OutputCommitCoordinator$OutputCommitCoordinatorEndpoint")
    +      ) ++ Seq(
    +        // SPARK-4655 - Making Stage an Abstract class broke binary 
compatility even though
    +        // the stage class is defined as private[spark]
    +        
ProblemFilters.exclude[AbstractClassProblem]("org.apache.spark.scheduler.Stage")
    +      ) ++ Seq(
    +        // SPARK-6510 Add a Graph#minus method acting as Set#difference
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.VertexRDD.minus")
    +      ) ++ Seq(
    +        // SPARK-6492 Fix deadlock in SparkContext.stop()
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.org$"
 +
    +            "apache$spark$SparkContext$$SPARK_CONTEXT_CONSTRUCTOR_LOCK")
    +      )++ Seq(
    +        // SPARK-6693 add tostring with max lines and width for matrix
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrix.toString")
    +      )++ Seq(
    +        // SPARK-6703 Add getOrCreate method to SparkContext
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem]
    +            
("org.apache.spark.SparkContext.org$apache$spark$SparkContext$$activeContext")
    +      )++ Seq(
    +        // SPARK-7090 Introduce LDAOptimizer to LDA to further improve 
extensibility
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.mllib.clustering.LDA$EMOptimizer")
    +      ) ++ Seq(
    +        // SPARK-6756 add toSparse, toDense, numActives, numNonzeros, and 
compressed to Vector
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Vector.compressed"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Vector.toDense"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Vector.numNonzeros"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Vector.toSparse"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Vector.numActives"),
    +        // SPARK-7681 add SparseVector support for gemv
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrix.multiply"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.DenseMatrix.multiply"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.SparseMatrix.multiply")
    +      ) ++ Seq(
    +        // Execution should never be included as its always internal.
    +        MimaBuild.excludeSparkPackage("sql.execution"),
    +        // This `protected[sql]` method was removed in 1.3.1
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.sql.SQLContext.checkAnalysis"),
    +        // These `private[sql]` class were removed in 1.4.0:
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.execution.AddExchange"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.execution.AddExchange$"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.PartitionSpec"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.PartitionSpec$"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.Partition"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.Partition$"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.ParquetRelation2$PartitionValues"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          
"org.apache.spark.sql.parquet.ParquetRelation2$PartitionValues$"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.ParquetRelation2"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.ParquetRelation2$"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.ParquetRelation2$MetadataCache"),
    +        // These test support classes were moved out of src/main and into 
src/test:
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.ParquetTestData"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.ParquetTestData$"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.TestGroupWriteSupport"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.CachedData"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.CachedData$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.CacheManager"),
    +        // TODO: Remove the following rule once ParquetTest has been moved 
to src/test.
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.sql.parquet.ParquetTest")
    +      ) ++ Seq(
    +        // SPARK-7530 Added StreamingContext.getState()
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.streaming.StreamingContext.state_=")
    +      ) ++ Seq(
    +        // SPARK-7081 changed ShuffleWriter from a trait to an abstract 
class and removed some
    +        // unnecessary type bounds in order to fix some compiler warnings 
that occurred when
    +        // implementing this interface in Java. Note that ShuffleWriter is 
private[spark].
    +        ProblemFilters.exclude[IncompatibleTemplateDefProblem](
    +          "org.apache.spark.shuffle.ShuffleWriter")
    +      ) ++ Seq(
    +        // SPARK-6888 make jdbc driver handling user definable
    +        // This patch renames some classes to API friendly names.
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.DriverQuirks$"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.DriverQuirks"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.PostgresQuirks"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.NoQuirks"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.jdbc.MySQLQuirks")
    +      )
     
    -        case v if v.startsWith("1.3") =>
    -          Seq(
    -            MimaBuild.excludeSparkPackage("deploy"),
    -            MimaBuild.excludeSparkPackage("ml"),
    -            // These are needed if checking against the sbt build, since 
they are part of
    -            // the maven-generated artifacts in the 1.2 build.
    -            MimaBuild.excludeSparkPackage("unused"),
    -            
ProblemFilters.exclude[MissingClassProblem]("com.google.common.base.Optional")
    -          ) ++ Seq(
    -            // SPARK-2321
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.SparkStageInfoImpl.this"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.SparkStageInfo.submissionTime")
    -          ) ++ Seq(
    -            // SPARK-4614
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrices.randn"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrices.rand")
    -          ) ++ Seq(
    -            // SPARK-5321
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.linalg.SparseMatrix.transposeMultiply"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrix.transpose"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.linalg.DenseMatrix.transposeMultiply"),
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.linalg.Matrix."
 +
    -                
"org$apache$spark$mllib$linalg$Matrix$_setter_$isTransposed_="),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrix.isTransposed"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.mllib.linalg.Matrix.foreachActive")
    -          ) ++ Seq(
    -            // SPARK-5540
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.solveLeastSquares"),
    -            // SPARK-5536
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateFeatures"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateBlock")
    -          ) ++ Seq(
    -            // SPARK-3325
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.streaming.api.java.JavaDStreamLike.print"),
    -            // SPARK-2757
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              
"org.apache.spark.streaming.flume.sink.SparkAvroCallbackHandler." +
    -                "removeAndGetProcessor")
    -          ) ++ Seq(
    -            // SPARK-5123 (SparkSQL data type change) - alpha component 
only
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              "org.apache.spark.ml.feature.HashingTF.outputDataType"),
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              "org.apache.spark.ml.feature.Tokenizer.outputDataType"),
    -            ProblemFilters.exclude[IncompatibleMethTypeProblem](
    -              "org.apache.spark.ml.feature.Tokenizer.validateInputType"),
    -            ProblemFilters.exclude[IncompatibleMethTypeProblem](
    -              
"org.apache.spark.ml.classification.LogisticRegressionModel.validateAndTransformSchema"),
    -            ProblemFilters.exclude[IncompatibleMethTypeProblem](
    -              
"org.apache.spark.ml.classification.LogisticRegression.validateAndTransformSchema")
    -          ) ++ Seq(
    -            // SPARK-4014
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.TaskContext.taskAttemptId"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.TaskContext.attemptNumber")
    -          ) ++ Seq(
    -            // SPARK-5166 Spark SQL API stabilization
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Transformer.transform"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Estimator.fit"),
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.Transformer.transform"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Pipeline.fit"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.PipelineModel.transform"),
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.Estimator.fit"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Evaluator.evaluate"),
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.Evaluator.evaluate"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.CrossValidator.fit"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.CrossValidatorModel.transform"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StandardScaler.fit"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StandardScalerModel.transform"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.transform"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegression.fit"),
    -            
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.evaluation.BinaryClassificationEvaluator.evaluate")
    -          ) ++ Seq(
    -            // SPARK-5270
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.isEmpty")
    -          ) ++ Seq(
    -            // SPARK-5430
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.treeReduce"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.treeAggregate")
    -          ) ++ Seq(
    -            // SPARK-5297 Java FileStream do not work with custom 
key/values
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.streaming.api.java.JavaStreamingContext.fileStream")
    -          ) ++ Seq(
    -            // SPARK-5315 Spark Streaming Java API returns Scala DStream
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.streaming.api.java.JavaDStreamLike.reduceByWindow")
    -          ) ++ Seq(
    -            // SPARK-5461 Graph should have isCheckpointed, 
getCheckpointFiles methods
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.graphx.Graph.getCheckpointFiles"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.graphx.Graph.isCheckpointed")
    -          ) ++ Seq(
    -            // SPARK-4789 Standardize ML Prediction APIs
    -            
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.mllib.linalg.VectorUDT"),
    -            
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.linalg.VectorUDT.serialize"),
    -            
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.linalg.VectorUDT.sqlType")
    -          ) ++ Seq(
    -            // SPARK-5814
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$wrapDoubleArray"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$fillFullMatrix"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$iterations"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$makeOutLinkBlock"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$computeYtY"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$makeLinkRDDs"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$alpha"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$randomFactor"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$makeInLinkBlock"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$dspr"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$lambda"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$implicitPrefs"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$rank")
    -          ) ++ Seq(
    -            // SPARK-4682
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.RealClock"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.Clock"),
    -            
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.TestClock")
    -          ) ++ Seq(
    -            // SPARK-5922 Adding a generalized diff(other: RDD[(VertexId, 
VD)]) to VertexRDD
    -            
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.VertexRDD.diff")
    -          )
    +    case v if v.startsWith("1.3") =>
    +      Seq(
    +        MimaBuild.excludeSparkPackage("deploy"),
    +        MimaBuild.excludeSparkPackage("ml"),
    +        // These are needed if checking against the sbt build, since they 
are part of
    +        // the maven-generated artifacts in the 1.2 build.
    +        MimaBuild.excludeSparkPackage("unused"),
    +        
ProblemFilters.exclude[MissingClassProblem]("com.google.common.base.Optional")
    +      ) ++ Seq(
    +        // SPARK-2321
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.SparkStageInfoImpl.this"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.SparkStageInfo.submissionTime")
    +      ) ++ Seq(
    +        // SPARK-4614
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrices.randn"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrices.rand")
    +      ) ++ Seq(
    +        // SPARK-5321
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.SparseMatrix.transposeMultiply"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrix.transpose"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.DenseMatrix.transposeMultiply"),
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.linalg.Matrix."
 +
    +            
"org$apache$spark$mllib$linalg$Matrix$_setter_$isTransposed_="),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrix.isTransposed"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.linalg.Matrix.foreachActive")
    +      ) ++ Seq(
    +        // SPARK-5540
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.mllib.recommendation.ALS.solveLeastSquares"),
    +        // SPARK-5536
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateFeatures"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateBlock")
    +      ) ++ Seq(
    +        // SPARK-3325
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.streaming.api.java.JavaDStreamLike.print"),
    +        // SPARK-2757
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem](
    +          
"org.apache.spark.streaming.flume.sink.SparkAvroCallbackHandler." +
    +            "removeAndGetProcessor")
    +      ) ++ Seq(
    +        // SPARK-5123 (SparkSQL data type change) - alpha component only
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem](
    +          "org.apache.spark.ml.feature.HashingTF.outputDataType"),
    +        ProblemFilters.exclude[IncompatibleResultTypeProblem](
    +          "org.apache.spark.ml.feature.Tokenizer.outputDataType"),
    +        ProblemFilters.exclude[IncompatibleMethTypeProblem](
    +          "org.apache.spark.ml.feature.Tokenizer.validateInputType"),
    +        ProblemFilters.exclude[IncompatibleMethTypeProblem](
    +          
"org.apache.spark.ml.classification.LogisticRegressionModel.validateAndTransformSchema"),
    +        ProblemFilters.exclude[IncompatibleMethTypeProblem](
    +          
"org.apache.spark.ml.classification.LogisticRegression.validateAndTransformSchema")
    +      ) ++ Seq(
    +        // SPARK-4014
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.TaskContext.taskAttemptId"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.TaskContext.attemptNumber")
    +      ) ++ Seq(
    +        // SPARK-5166 Spark SQL API stabilization
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Transformer.transform"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Estimator.fit"),
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.Transformer.transform"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Pipeline.fit"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.PipelineModel.transform"),
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.Estimator.fit"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Evaluator.evaluate"),
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.Evaluator.evaluate"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.CrossValidator.fit"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.CrossValidatorModel.transform"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StandardScaler.fit"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StandardScalerModel.transform"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.transform"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegression.fit"),
    +        
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.evaluation.BinaryClassificationEvaluator.evaluate")
    +      ) ++ Seq(
    +        // SPARK-5270
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.java.JavaRDDLike.isEmpty")
    +      ) ++ Seq(
    +        // SPARK-5430
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.java.JavaRDDLike.treeReduce"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.api.java.JavaRDDLike.treeAggregate")
    +      ) ++ Seq(
    +        // SPARK-5297 Java FileStream do not work with custom key/values
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.streaming.api.java.JavaStreamingContext.fileStream")
    +      ) ++ Seq(
    +        // SPARK-5315 Spark Streaming Java API returns Scala DStream
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.streaming.api.java.JavaDStreamLike.reduceByWindow")
    +      ) ++ Seq(
    +        // SPARK-5461 Graph should have isCheckpointed, getCheckpointFiles 
methods
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.graphx.Graph.getCheckpointFiles"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          "org.apache.spark.graphx.Graph.isCheckpointed")
    +      ) ++ Seq(
    +        // SPARK-4789 Standardize ML Prediction APIs
    +        
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.mllib.linalg.VectorUDT"),
    +        
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.linalg.VectorUDT.serialize"),
    +        
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.linalg.VectorUDT.sqlType")
    +      ) ++ Seq(
    +        // SPARK-5814
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$wrapDoubleArray"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$fillFullMatrix"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$iterations"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$makeOutLinkBlock"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$computeYtY"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$makeLinkRDDs"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$alpha"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$randomFactor"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$makeInLinkBlock"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$dspr"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$lambda"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$implicitPrefs"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$rank")
    +      ) ++ Seq(
    +        // SPARK-4682
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.RealClock"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.Clock"),
    +        
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.TestClock")
    +      ) ++ Seq(
    +        // SPARK-5922 Adding a generalized diff(other: RDD[(VertexId, 
VD)]) to VertexRDD
    +        
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.VertexRDD.diff")
    +      )
     
    -        case v if v.startsWith("1.2") =>
    -          Seq(
    -            MimaBuild.excludeSparkPackage("deploy"),
    -            MimaBuild.excludeSparkPackage("graphx")
    -          ) ++
    -          MimaBuild.excludeSparkClass("mllib.linalg.Matrix") ++
    -          MimaBuild.excludeSparkClass("mllib.linalg.Vector") ++
    -          Seq(
    -            ProblemFilters.exclude[IncompatibleTemplateDefProblem](
    -              "org.apache.spark.scheduler.TaskLocation"),
    -            // Added normL1 and normL2 to trait 
MultivariateStatisticalSummary
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.stat.MultivariateStatisticalSummary.normL1"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.mllib.stat.MultivariateStatisticalSummary.normL2"),
    -            // MapStatus should be private[spark]
    -            ProblemFilters.exclude[IncompatibleTemplateDefProblem](
    -              "org.apache.spark.scheduler.MapStatus"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.network.netty.PathResolver"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.spark.network.netty.client.BlockClientListener"),
    +    case v if v.startsWith("1.2") =>
    +      Seq(
    +        MimaBuild.excludeSparkPackage("deploy"),
    +        MimaBuild.excludeSparkPackage("graphx")
    +      ) ++
    +      MimaBuild.excludeSparkClass("mllib.linalg.Matrix") ++
    +      MimaBuild.excludeSparkClass("mllib.linalg.Vector") ++
    +      Seq(
    +        ProblemFilters.exclude[IncompatibleTemplateDefProblem](
    +          "org.apache.spark.scheduler.TaskLocation"),
    +        // Added normL1 and normL2 to trait MultivariateStatisticalSummary
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.stat.MultivariateStatisticalSummary.normL1"),
    +        ProblemFilters.exclude[MissingMethodProblem](
    +          
"org.apache.spark.mllib.stat.MultivariateStatisticalSummary.normL2"),
    +        // MapStatus should be private[spark]
    +        ProblemFilters.exclude[IncompatibleTemplateDefProblem](
    +          "org.apache.spark.scheduler.MapStatus"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.network.netty.PathResolver"),
    +        ProblemFilters.exclude[MissingClassProblem](
    +          "org.apache.spark.network.netty.client.BlockClientListener"),
     
    -            // TaskContext was promoted to Abstract class
    -            ProblemFilters.exclude[AbstractClassProblem](
    -              "org.apache.spark.TaskContext"),
    -            ProblemFilters.exclude[IncompatibleTemplateDefProblem](
    -              "org.apache.spark.util.collection.SortDataFormat")
    -          ) ++ Seq(
    -            // Adding new methods to the JavaRDDLike trait:
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.takeAsync"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.api.java.JavaRDDLike.foreachPartitionAsync"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.countAsync"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.foreachAsync"),
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              "org.apache.spark.api.java.JavaRDDLike.collectAsync")
    -          ) ++ Seq(
    -            // SPARK-3822
    -            ProblemFilters.exclude[IncompatibleResultTypeProblem](
    -              
"org.apache.spark.SparkContext.org$apache$spark$SparkContext$$createTaskScheduler")
    -          ) ++ Seq(
    -            // SPARK-1209
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.hadoop.mapreduce.SparkHadoopMapReduceUtil"),
    -            ProblemFilters.exclude[MissingClassProblem](
    -              "org.apache.hadoop.mapred.SparkHadoopMapRedUtil"),
    -            ProblemFilters.exclude[MissingTypesProblem](
    -              "org.apache.spark.rdd.PairRDDFunctions")
    -          ) ++ Seq(
    -            // SPARK-4062
    -            ProblemFilters.exclude[MissingMethodProblem](
    -              
"org.apache.spark.streaming.kafka.KafkaReceiver#MessageHandler.this")
    -          )
    +        // TaskContext was promoted to Abstract class
    +        ProblemFilters.exclude[AbstractClassProblem](
    +          "org.apache.spark.TaskContext"),
    +        Pr
    --- End diff --
    
    line end


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to