Github user liancheng commented on a diff in the pull request:
https://github.com/apache/spark/pull/12002#discussion_r57595515
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/DefaultSource.scala
---
@@ -91,6 +96,70 @@ class DefaultSource extends FileFormat with
DataSourceRegister {
new CSVOutputWriterFactory(csvOptions)
}
+ override def buildReader(
+ sqlContext: SQLContext,
+ physicalSchema: StructType,
+ partitionSchema: StructType,
+ dataSchema: StructType,
+ filters: Seq[Filter],
+ options: Map[String, String]): (PartitionedFile) =>
Iterator[InternalRow] = {
+ val csvOptions = new CSVOptions(options)
+ val headers = dataSchema.fields.map(_.name)
+
+ val conf = new
Configuration(sqlContext.sparkContext.hadoopConfiguration)
+ val broadcastedConf = sqlContext.sparkContext.broadcast(new
SerializableConfiguration(conf))
+
+ (file: PartitionedFile) => {
+ val fileSplit = {
+ val filePath = new Path(new URI(file.filePath))
+ new FileSplit(filePath, file.start, file.length, Array.empty)
+ }
+
+ val hadoopAttemptContext = {
+ val conf = broadcastedConf.value.value
+ val attemptID = new TaskAttemptID(new TaskID(new JobID(),
TaskType.MAP, 0), 0)
+ new TaskAttemptContextImpl(conf, attemptID)
+ }
+
+ val reader = new LineRecordReader()
+ reader.initialize(fileSplit, hadoopAttemptContext)
+
+ val lineIterator = new RecordReaderIterator(reader).map { line =>
+ new String(line.getBytes, 0, line.getLength, csvOptions.charset)
+ }
+
+ // Skips the header line of each file if the `header` option is set
to true.
+ // TODO What if the first partitioned file consists of only comments
and empty lines?
+ if (csvOptions.headerFlag && file.start == 0) {
+ val nonEmptyLines = if (csvOptions.isCommentSet) {
+ val commentPrefix = csvOptions.comment.toString
+ lineIterator.dropWhile { line =>
+ line.trim.isEmpty || line.trim.startsWith(commentPrefix)
+ }
+ } else {
+ lineIterator.dropWhile(_.trim.isEmpty)
+ }
+
+ if (nonEmptyLines.hasNext) nonEmptyLines.drop(1)
+ }
+
+ val unsafeRowIterator = {
+ val tokenizedIterator = new BulkCsvReader(lineIterator,
csvOptions, headers)
+ val parser = CSVRelation.csvParser(physicalSchema,
dataSchema.fieldNames, csvOptions)
+ tokenizedIterator.flatMap(parser(_).toSeq)
+ }
+
+ // Appends partition values
+ val fullOutput = dataSchema.toAttributes ++
partitionSchema.toAttributes
+ val joinedRow = new JoinedRow()
+ val appendPartitionColumns =
GenerateUnsafeProjection.generate(fullOutput, fullOutput)
+
+ unsafeRowIterator.map { dataRow =>
+ appendPartitionColumns(joinedRow(dataRow, file.partitionValues))
+ }
--- End diff --
Partition value appending is going to be extracted into a separate method
and reused in all data sources in follow-up PRs.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]