Github user viirya commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19492#discussion_r144790680
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
 ---
    @@ -536,26 +536,31 @@ case class JsonToStructs(
           timeZoneId = None)
     
       override def checkInputDataTypes(): TypeCheckResult = schema match {
    -    case _: StructType | ArrayType(_: StructType, _) =>
    +    case _: StructType | ArrayType(_: StructType | _: AtomicType, _) =>
           super.checkInputDataTypes()
         case _ => TypeCheckResult.TypeCheckFailure(
    -      s"Input schema ${schema.simpleString} must be a struct or an array 
of structs.")
    +      s"Input schema ${schema.simpleString} must be a struct or " +
    +        s"an array of structs or primitive types.")
       }
     
       @transient
    -  lazy val rowSchema = schema match {
    +  lazy val rowSchema: DataType = schema match {
    --- End diff --
    
    I think it was row scheme because it can only be `StructType` before. This 
is not the input/output row's schema.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to