Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19464#discussion_r144604878
  
    --- Diff: core/src/test/scala/org/apache/spark/FileSuite.scala ---
    @@ -510,4 +510,87 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
         }
       }
     
    +  test("spark.files.ignoreEmptySplits work correctly (old Hadoop API)") {
    +    val conf = new SparkConf()
    +    conf.setAppName("test").setMaster("local").set(IGNORE_EMPTY_SPLITS, 
true)
    +    sc = new SparkContext(conf)
    +
    +    def testIgnoreEmptySplits(
    +      data: Array[Tuple2[String, String]],
    +      actualPartitionNum: Int,
    +      expectedPart: String,
    +      expectedPartitionNum: Int): Unit = {
    +      val output = new File(tempDir, "output")
    +      sc.parallelize(data, actualPartitionNum)
    +        .saveAsHadoopFile[TextOutputFormat[String, String]](output.getPath)
    +      assert(new File(output, expectedPart).exists() === true)
    --- End diff --
    
    I don't think we need the `expectedPart` parameter, just
    ```
    for (i <- 0 until actualPartitionNum) {
      assert(new File(output, s"part-0000$i").exists() === true)
    }
    ```


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to