Github user gatorsmile commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21415#discussion_r190660661
  
    --- Diff: 
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
 ---
    @@ -1383,4 +1385,31 @@ class CSVSuite extends QueryTest with 
SharedSQLContext with SQLTestUtils with Te
     
         checkAnswer(ds, Seq(Row(""" "a" """)))
       }
    +
    +  test("SPARK-24244: Select a subset of all columns") {
    +    withTempPath { path =>
    +      import collection.JavaConverters._
    +      val schema = new StructType()
    +        .add("f1", IntegerType).add("f2", IntegerType).add("f3", 
IntegerType)
    +        .add("f4", IntegerType).add("f5", IntegerType).add("f6", 
IntegerType)
    +        .add("f7", IntegerType).add("f8", IntegerType).add("f9", 
IntegerType)
    +        .add("f10", IntegerType).add("f11", IntegerType).add("f12", 
IntegerType)
    +        .add("f13", IntegerType).add("f14", IntegerType).add("f15", 
IntegerType)
    +
    +      val odf = spark.createDataFrame(List(
    +        Row(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
    +        Row(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, 
-15)
    +      ).asJava, schema)
    +      odf.write.csv(path.getCanonicalPath)
    +      val idf = spark.read
    +        .schema(schema)
    +        .csv(path.getCanonicalPath)
    +        .select('f15, 'f10, 'f5)
    --- End diff --
    
    Could you add an extreme test case? Try `count(1)` on csv files? That means 
zero column is required.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to