Github user jinxing64 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19652#discussion_r148749276
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala ---
    @@ -1485,21 +1487,27 @@ class SparkSqlAstBuilder(conf: SQLConf) extends 
AstBuilder(conf) {
     
           case null =>
             // Use default (serde) format.
    -        val name = conf.getConfString("hive.script.serde",
    -          "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")
    +        val name = if (isInFormat) {
    +          conf.getConfString("hive.script.serde",
    +            "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe")
    +        } else {
    +          conf.getConfString("hive.script.serde",
    +            "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")
    +        }
             val props = Seq("field.delim" -> "\t")
             val recordHandler = Option(conf.getConfString(configKey, 
defaultConfigValue))
             (Nil, Option(name), props, recordHandler)
         }
     
    -    val (inFormat, inSerdeClass, inSerdeProps, reader) =
    +    val (inFormat, inSerdeClass, inSerdeProps, writer) =
    --- End diff --
    
    `writer` writes records into the input stream of script. Isn't it should be 
initialized with input format?


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to