Github user jackylk commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/1876#discussion_r164652321
  
    --- Diff: 
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
 ---
    @@ -588,15 +576,13 @@ case class CarbonLoadDataCommand(
             }
             val len = rowDataTypes.length
             var rdd =
    -          new NewHadoopRDD[NullWritable, StringArrayWritable](
    -            sparkSession.sparkContext,
    -            classOf[CSVInputFormat],
    -            classOf[NullWritable],
    -            classOf[StringArrayWritable],
    -            jobConf).map { case (key, value) =>
    +          DataLoadingUtil.csvFileScanRDD(
    +            sparkSession,
    +            model = carbonLoadModel,
    +            hadoopConf).map { row =>
    --- End diff --
    
    move `.map` to next line


---

Reply via email to