Github user pwendell commented on a diff in the pull request:
https://github.com/apache/incubator-spark/pull/585#discussion_r9890236
--- Diff: project/MimaBuild.scala ---
@@ -0,0 +1,105 @@
+import com.typesafe.tools.mima.plugin.MimaKeys.{binaryIssueFilters,
previousArtifact}
+import com.typesafe.tools.mima.plugin.MimaPlugin.mimaDefaultSettings
+
+object MimaBuild {
+
+ val ignoredABIProblems = {
+ import com.typesafe.tools.mima.core._
+ import com.typesafe.tools.mima.core.ProblemFilters._
+ /**
+ * A: Detections are semi private or likely to become semi private at
some point.
+ */
+
Seq(exclude[MissingClassProblem]("org.apache.spark.util.XORShiftRandom"),
+
exclude[MissingClassProblem]("org.apache.spark.util.XORShiftRandom$"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.Utils.cloneWritables"),
+ // Scheduler is not considered a public API.
+ excludePackage("org.apache.spark.deploy"),
+ // Was made private in 1.0
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.nextItem_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.objectsRead"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.cleanup"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.objectsRead_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.fileStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.deserializeStream_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.compressedStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.nextItem"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.deserializeStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.bufferedStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.readNextItem"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.eof"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.eof_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.this"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.StreamBuffer"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.sortedMap"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.getMorePairs"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.mergeIfKeyExists"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.mergeHeap"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.inputStreams"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap.org$apache$spark$util$collection$ExternalAppendOnlyMap$$wrapForCompression$1"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap.org$apache$spark$util$collection$ExternalAppendOnlyMap$$sparkConf"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.shouldCompress"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.compressionCodec"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaPairRDD.cogroupResultToJava"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaPairRDD.groupByResultToJava"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.scheduler.TaskSchedulerImpl.handleFailedTask"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.TaskSchedulerImpl.taskSetTaskIds"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.scheduler.TaskSetManager.handleFailedTask"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.TaskSetManager.removeAllRunningTasks"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.TaskSetManager.runningTasks_="),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.DAGScheduler.lastFetchFailureTime"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.DAGScheduler.lastFetchFailureTime_="),
+
exclude[MissingMethodProblem]("org.apache.spark.storage.BlockObjectWriter.bytesWritten"))
++
+ /**
+ * B: Detections are mostly false +ve.
+ */
+
Seq(exclude[MissingMethodProblem]("org.apache.spark.SparkContext.newAPIHadoopFile$default$6"),
--- End diff --
@ScrapCodes I think you might be pulling in a locally published version of
0.9.0-incubating that was based on an older build, because none of these hadoop
methods have been changed between master and 0.9.0. To test this I removed all
of these and the mima check still went alright. Maybe try clearing your ivy
cache and local?
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. To do so, please top-post your response.
If your project does not have this feature enabled and wishes so, or if the
feature is enabled but not working, please contact infrastructure at
[email protected] or file a JIRA ticket with INFRA.
---