srowen commented on a change in pull request #23417: [SPARK-26374][TEST][SQL] 
Enable TimestampFormatter in HadoopFsRelationTest
URL: https://github.com/apache/spark/pull/23417#discussion_r244622327
 
 

 ##########
 File path: 
sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
 ##########
 @@ -126,61 +126,60 @@ abstract class HadoopFsRelationTest extends QueryTest 
with SQLTestUtils with Tes
     } else {
       Seq(false)
     }
-    // TODO: Support new parser too, see SPARK-26374.
-    withSQLConf(SQLConf.LEGACY_TIME_PARSER_ENABLED.key -> "true") {
-      for (dataType <- supportedDataTypes) {
-        for (parquetDictionaryEncodingEnabled <- 
parquetDictionaryEncodingEnabledConfs) {
-          val extraMessage = if (isParquetDataSource) {
-            s" with parquet.enable.dictionary = 
$parquetDictionaryEncodingEnabled"
-          } else {
-            ""
-          }
-          logInfo(s"Testing $dataType data type$extraMessage")
-
-          val extraOptions = Map[String, String](
-            "parquet.enable.dictionary" -> 
parquetDictionaryEncodingEnabled.toString
-          )
-
-          withTempPath { file =>
-            val path = file.getCanonicalPath
-
-            val seed = System.nanoTime()
-            withClue(s"Random data generated with the seed: ${seed}") {
-              val dataGenerator = RandomDataGenerator.forType(
-                dataType = dataType,
-                nullable = true,
-                new Random(seed)
-              ).getOrElse {
-                fail(s"Failed to create data generator for schema $dataType")
-              }
-
-              // Create a DF for the schema with random data. The index field 
is used to sort the
-              // DataFrame.  This is a workaround for SPARK-10591.
-              val schema = new StructType()
-                .add("index", IntegerType, nullable = false)
-                .add("col", dataType, nullable = true)
-              val rdd =
-                spark.sparkContext.parallelize((1 to 10).map(i => Row(i, 
dataGenerator())))
-              val df = spark.createDataFrame(rdd, 
schema).orderBy("index").coalesce(1)
-
-              df.write
-                .mode("overwrite")
-                .format(dataSourceName)
-                .option("dataSchema", df.schema.json)
-                .options(extraOptions)
-                .save(path)
-
-              val loadedDF = spark
-                .read
-                .format(dataSourceName)
-                .option("dataSchema", df.schema.json)
-                .schema(df.schema)
-                .options(extraOptions)
-                .load(path)
-                .orderBy("index")
-
-              checkAnswer(loadedDF, df)
+
+    for (dataType <- supportedDataTypes) {
+      for (parquetDictionaryEncodingEnabled <- 
parquetDictionaryEncodingEnabledConfs) {
+        val extraMessage = if (isParquetDataSource) {
+          s" with parquet.enable.dictionary = 
$parquetDictionaryEncodingEnabled"
+        } else {
+          ""
+        }
+        logInfo(s"Testing $dataType data type$extraMessage")
+
+        val extraOptions = Map[String, String](
+          "parquet.enable.dictionary" -> 
parquetDictionaryEncodingEnabled.toString,
+          "timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX"
 
 Review comment:
   OK so I guess we have some custom code to handle the (slightly nonstandard, 
but important) timezone with seconds offset. I mean the tests pass, so it's 
doing it right somewhere, just kinda wanted to understand why it would work at 
all to make sure I'm not missing something -- because as you say it can't be 
using the pattern you added, it's not valid for parsing. This sounds OK to me.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to