Github user MaxGekk commented on a diff in the pull request:
https://github.com/apache/spark/pull/20937#discussion_r179743195
--- Diff:
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
---
@@ -2127,4 +2127,243 @@ class JsonSuite extends QueryTest with
SharedSQLContext with TestJsonData {
assert(df.schema === expectedSchema)
}
}
+
+ def testFile(fileName: String): String = {
+
Thread.currentThread().getContextClassLoader.getResource(fileName).toString
+ }
+
+ test("SPARK-23723: json in UTF-16 with BOM") {
+ val fileName = "json-tests/utf16WithBOM.json"
+ val schema = new StructType().add("firstName",
StringType).add("lastName", StringType)
+ val jsonDF = spark.read.schema(schema)
+ .option("multiline", "true")
+ .option("encoding", "UTF-16")
+ .json(testFile(fileName))
+
+ checkAnswer(jsonDF, Seq(
+ Row("Chris", "Baird"), Row("Doug", "Rood")
+ ))
+ }
+
+ test("SPARK-23723: multi-line json in UTF-32BE with BOM") {
+ val fileName = "json-tests/utf32BEWithBOM.json"
+ val schema = new StructType().add("firstName",
StringType).add("lastName", StringType)
+ val jsonDF = spark.read.schema(schema)
+ .option("multiline", "true")
+ .json(testFile(fileName))
+
+ checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
+ }
+
+ test("SPARK-23723: Use user's encoding in reading of multi-line json in
UTF-16LE") {
+ val fileName = "json-tests/utf16LE.json"
+ val schema = new StructType().add("firstName",
StringType).add("lastName", StringType)
+ val jsonDF = spark.read.schema(schema)
+ .option("multiline", "true")
+ .options(Map("encoding" -> "UTF-16LE"))
+ .json(testFile(fileName))
+
+ checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
+ }
+
+ test("SPARK-23723: Unsupported encoding name") {
+ val invalidCharset = "UTF-128"
+ val exception = intercept[java.io.UnsupportedEncodingException] {
+ spark.read
+ .options(Map("encoding" -> invalidCharset, "lineSep" -> "\n"))
+ .json(testFile("json-tests/utf16LE.json"))
+ .count()
+ }
+
+ assert(exception.getMessage.contains(invalidCharset))
+ }
+
+ test("SPARK-23723: checking that the encoding option is case agnostic") {
+ val fileName = "json-tests/utf16LE.json"
+ val schema = new StructType().add("firstName",
StringType).add("lastName", StringType)
+ val jsonDF = spark.read.schema(schema)
+ .option("multiline", "true")
+ .options(Map("encoding" -> "uTf-16lE"))
+ .json(testFile(fileName))
+
+ checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
+ }
+
+
+ test("SPARK-23723: specified encoding is not matched to actual
encoding") {
+ val fileName = "json-tests/utf16LE.json"
+ val schema = new StructType().add("firstName",
StringType).add("lastName", StringType)
+ val exception = intercept[SparkException] {
+ spark.read.schema(schema)
+ .option("mode", "FAILFAST")
+ .option("multiline", "true")
+ .options(Map("encoding" -> "UTF-16BE"))
+ .json(testFile(fileName))
+ .count()
+ }
+ val errMsg = exception.getMessage
+
+ assert(errMsg.contains("Malformed records are detected in record
parsing"))
+ }
+
+ def checkCharset(
+ expectedCharset: String,
+ pathToJsonFiles: String,
+ expectedContent: String
+ ): Unit = {
+ val jsonFiles = new File(pathToJsonFiles)
+ .listFiles()
+ .filter(_.isFile)
+ .filter(_.getName.endsWith("json"))
+ val jsonContent = jsonFiles.map { file =>
+ scala.io.Source.fromFile(file, expectedCharset).mkString
+ }
+ val cleanedContent = jsonContent
+ .mkString
+ .trim
+ .replaceAll(" ", "")
+
+ assert(cleanedContent == expectedContent)
+ }
+
+ test("SPARK-23723: save json in UTF-32BE") {
+ val encoding = "UTF-32BE"
+ withTempPath { path =>
+ val df = spark.createDataset(Seq(("Dog", 42)))
+ df.write
+ .options(Map("encoding" -> encoding, "lineSep" -> "\n"))
+ .format("json").mode("overwrite")
+ .save(path.getCanonicalPath)
+
+ checkCharset(
+ expectedCharset = encoding,
+ pathToJsonFiles = path.getCanonicalPath,
+ expectedContent = """{"_1":"Dog","_2":42}"""
+ )
+ }
+ }
+
+ test("SPARK-23723: save json in default encoding - UTF-8") {
+ withTempPath { path =>
+ val df = spark.createDataset(Seq(("Dog", 42)))
+ df.write
+ .format("json").mode("overwrite")
+ .save(path.getCanonicalPath)
+
+ checkCharset(
+ expectedCharset = "UTF-8",
+ pathToJsonFiles = path.getCanonicalPath,
+ expectedContent = """{"_1":"Dog","_2":42}"""
+ )
+ }
+ }
+
+ test("SPARK-23723: wrong output encoding") {
+ val encoding = "UTF-128"
+ val exception = intercept[java.io.UnsupportedEncodingException] {
+ withTempPath { path =>
+ val df = spark.createDataset(Seq((0)))
+ df.write
+ .options(Map("encoding" -> encoding, "lineSep" -> "\n"))
+ .format("json").mode("overwrite")
+ .save(path.getCanonicalPath)
+ }
+ }
+
+ assert(exception.getMessage == encoding)
+ }
+
+ test("SPARK-23723: read written json in UTF-16LE") {
+ val options = Map("encoding" -> "UTF-16LE", "lineSep" -> "\n")
+ withTempPath { path =>
+ val ds = spark.createDataset(Seq(
+ ("a", 1), ("b", 2), ("c", 3))
+ ).repartition(2)
+ ds.write
+ .options(options)
+ .format("json").mode("overwrite")
+ .save(path.getCanonicalPath)
+ val savedDf = spark
+ .read
+ .options(options)
+ .json(path.getCanonicalPath)
+
+ checkAnswer(savedDf.toDF(), ds.toDF())
+ }
+ }
+
+ def checkReadJson(
+ lineSep: String,
+ encodingOption: String,
+ encoding: String,
+ inferSchema: Boolean,
+ runId: Int
--- End diff --
but very convenient for looking for failed tests. If I pass invisible
lineSep in the test title, how should search the failed tests?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]