Github user NathanHowell commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16386#discussion_r100640620
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
 ---
    @@ -48,69 +47,98 @@ class JacksonParser(
     
       // A `ValueConverter` is responsible for converting a value from 
`JsonParser`
       // to a value in a field for `InternalRow`.
    -  private type ValueConverter = (JsonParser) => Any
    +  private type ValueConverter = JsonParser => AnyRef
     
       // `ValueConverter`s for the root schema for all fields in the schema
    -  private val rootConverter: ValueConverter = makeRootConverter(schema)
    +  private val rootConverter = makeRootConverter(schema)
     
       private val factory = new JsonFactory()
       options.setJacksonOptions(factory)
     
       private val emptyRow: Seq[InternalRow] = Seq(new 
GenericInternalRow(schema.length))
     
    +  private val corruptFieldIndex = 
schema.getFieldIndex(options.columnNameOfCorruptRecord)
    +
    +  @transient
    +  private val printWarningForMalformedRecord = ExecuteOnce[() => 
UTF8String] { record =>
    +    def sampleRecord: String = {
    +      if (options.wholeFile) {
    +        ""
    +      } else {
    +        s"Sample record: ${record()}\n"
    +      }
    +    }
    +
    +    def footer: String = {
    +      s"""Code example to print all malformed records (scala):
    +         |===================================================
    +         |// The corrupted record exists in column 
${options.columnNameOfCorruptRecord}.
    +         |val parsedJson = spark.read.json("/path/to/json/file/test.json")
    +         |
    +       """.stripMargin
    +    }
    +
    +    if (options.permissive) {
    +      logWarning(
    +        s"""Found at least one malformed record. The JSON reader will 
replace
    +           |all malformed records with placeholder null in current 
$PERMISSIVE_MODE parser mode.
    +           |To find out which corrupted records have been replaced with 
null, please use the
    +           |default inferred schema instead of providing a custom schema.
    +           |
    +           |${sampleRecord ++ footer}
    +           |
    +         """.stripMargin)
    +    } else if (options.dropMalformed) {
    +      logWarning(
    +        s"""Found at least one malformed record. The JSON reader will drop
    +           |all malformed records in current $DROP_MALFORMED_MODE parser 
mode. To find out which
    +           |corrupted records have been dropped, please switch the parser 
mode to $PERMISSIVE_MODE
    +           |mode and use the default inferred schema.
    +           |
    +           |${sampleRecord ++ footer}
    +           |
    +         """.stripMargin)
    +    }
    +  }
    +
       @transient
    -  private[this] var isWarningPrintedForMalformedRecord: Boolean = false
    +  private val printWarningIfWholeFile = ExecuteOnce[Unit] { _ =>
    +    if (options.wholeFile && corruptFieldIndex.isDefined) {
    +      logWarning(
    +        s"""Enabling wholeFile mode and defining columnNameOfCorruptRecord 
may result
    +           |in very large allocations or OutOfMemoryExceptions being 
raised.
    +           |
    +         """.stripMargin)
    +    }
    +  }
     
       /**
        * This function deals with the cases it fails to parse. This function 
will be called
        * when exceptions are caught during converting. This functions also 
deals with `mode` option.
        */
    -  private def failedRecord(record: String): Seq[InternalRow] = {
    -    // create a row even if no corrupt record column is present
    -    if (options.failFast) {
    -      throw new SparkSQLJsonProcessingException(s"Malformed line in 
FAILFAST mode: $record")
    -    }
    -    if (options.dropMalformed) {
    -      if (!isWarningPrintedForMalformedRecord) {
    -        logWarning(
    -          s"""Found at least one malformed records (sample: $record). The 
JSON reader will drop
    -             |all malformed records in current $DROP_MALFORMED_MODE parser 
mode. To find out which
    -             |corrupted records have been dropped, please switch the 
parser mode to $PERMISSIVE_MODE
    -             |mode and use the default inferred schema.
    -             |
    -             |Code example to print all malformed records (scala):
    -             |===================================================
    -             |// The corrupted record exists in column 
${columnNameOfCorruptRecord}
    -             |val parsedJson = 
spark.read.json("/path/to/json/file/test.json")
    -             |
    -           """.stripMargin)
    -        isWarningPrintedForMalformedRecord = true
    -      }
    -      Nil
    -    } else if (schema.getFieldIndex(columnNameOfCorruptRecord).isEmpty) {
    -      if (!isWarningPrintedForMalformedRecord) {
    -        logWarning(
    -          s"""Found at least one malformed records (sample: $record). The 
JSON reader will replace
    -             |all malformed records with placeholder null in current 
$PERMISSIVE_MODE parser mode.
    -             |To find out which corrupted records have been replaced with 
null, please use the
    -             |default inferred schema instead of providing a custom schema.
    -             |
    -             |Code example to print all malformed records (scala):
    -             |===================================================
    -             |// The corrupted record exists in column 
${columnNameOfCorruptRecord}.
    -             |val parsedJson = 
spark.read.json("/path/to/json/file/test.json")
    -             |
    -           """.stripMargin)
    -        isWarningPrintedForMalformedRecord = true
    -      }
    -      emptyRow
    -    } else {
    -      val row = new GenericInternalRow(schema.length)
    -      for (corruptIndex <- 
schema.getFieldIndex(columnNameOfCorruptRecord)) {
    +  private def failedRecord(record: () => UTF8String): Seq[InternalRow] = {
    +    corruptFieldIndex match {
    +      case _ if options.failFast =>
    +        if (options.wholeFile) {
    +          throw new SparkSQLJsonProcessingException("Malformed line in 
FAILFAST mode")
    +        } else {
    +          throw new SparkSQLJsonProcessingException(s"Malformed line in 
FAILFAST mode: ${record()}")
    +        }
    +
    +      case _ if options.dropMalformed =>
    +        printWarningForMalformedRecord(record)
    +        Nil
    +
    +      case None =>
    +        printWarningForMalformedRecord(record)
    +        emptyRow
    +
    +      case Some(corruptIndex) =>
    +        printWarningIfWholeFile(())
    --- End diff --
    
    Could be, do you prefer this approach?
    
    ```diff
    --- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
    +++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
    @@ -60,7 +60,9 @@ class JacksonParser(
       private val corruptFieldIndex = 
schema.getFieldIndex(options.columnNameOfCorruptRecord)
    
       @transient
    -  private val printWarningForMalformedRecord = ExecuteOnce[() => 
UTF8String] { record =>
    +  private[this] var isWarningPrinted: Boolean = false
    +
    +  private def printWarningForMalformedRecord(record: () => UTF8String): 
Unit = {
         def sampleRecord: String = {
           if (options.wholeFile) {
             ""
    @@ -102,7 +104,7 @@ class JacksonParser(
       }
    
       @transient
    -  private val printWarningIfWholeFile = ExecuteOnce[Unit] { _ =>
    +  private def printWarningIfWholeFile(): Unit = {
         if (options.wholeFile && corruptFieldIndex.isDefined) {
           logWarning(
             s"""Enabling wholeFile mode and defining columnNameOfCorruptRecord 
may result
    @@ -126,15 +128,24 @@ class JacksonParser(
             }
    
           case _ if options.dropMalformed =>
    -        printWarningForMalformedRecord(record)
    +        if (!isWarningPrinted) {
    +          printWarningForMalformedRecord(record)
    +          isWarningPrinted = true
    +        }
             Nil
    
           case None =>
    -        printWarningForMalformedRecord(record)
    +        if (!isWarningPrinted) {
    +          printWarningForMalformedRecord(record)
    +          isWarningPrinted = true
    +        }
             emptyRow
    
           case Some(corruptIndex) =>
    -        printWarningIfWholeFile(())
    +        if (!isWarningPrinted) {
    +          printWarningIfWholeFile()
    +          isWarningPrinted = true
    +        }
             val row = new GenericInternalRow(schema.length)
             require(schema(corruptIndex).dataType == StringType)
             row.update(corruptIndex, record())
    ```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to