Github user MaxGekk commented on a diff in the pull request:
https://github.com/apache/spark/pull/20894#discussion_r188635172
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVDataSource.scala
---
@@ -118,6 +120,61 @@ object CSVDataSource {
TextInputCSVDataSource
}
}
+
+ def checkHeaderColumnNames(
+ schema: StructType,
+ columnNames: Array[String],
+ fileName: String,
+ checkHeaderFlag: Boolean
+ ): Unit = {
+ if (checkHeaderFlag && columnNames != null) {
+ val fieldNames = schema.map(_.name).toIndexedSeq
+ val (headerLen, schemaSize) = (columnNames.size, fieldNames.length)
+ var error: Option[String] = None
+
+ if (headerLen == schemaSize) {
+ var i = 0
+ while (error.isEmpty && i < headerLen) {
+ val nameInSchema = fieldNames(i).toLowerCase
+ val nameInHeader = columnNames(i).toLowerCase
+ if (nameInHeader != nameInSchema) {
+ error = Some(
+ s"""|CSV file header does not contain the expected fields.
+ | Header: ${columnNames.mkString(", ")}
+ | Schema: ${fieldNames.mkString(", ")}
+ |Expected: $nameInSchema but found: $nameInHeader
+ |CSV file: $fileName""".stripMargin
+ )
+ }
+ i += 1
+ }
+ } else {
+ error = Some(
+ s"""|Number of column in CSV header is not equal to number of
fields in the schema:
+ | Header length: $headerLen, schema size: $schemaSize
+ |CSV file: $fileName""".stripMargin
+ )
+ }
+
+ error.headOption.foreach { msg =>
+ throw new IllegalArgumentException(msg)
+ }
+ }
+ }
+
+ def checkHeader(
--- End diff --
The `checkHeader` is using in the `csv` method of DataFrameReader. I think
the `CSVDataSource` object can encapsulate the checking.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]