MaxGekk commented on a change in pull request #27366: [SPARK-30648][SQL] 
Support filters pushdown in JSON datasource
URL: https://github.com/apache/spark/pull/27366#discussion_r373475841
 
 

 ##########
 File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
 ##########
 @@ -334,15 +336,19 @@ class JacksonParser(
   private def convertObject(
       parser: JsonParser,
       schema: StructType,
-      fieldConverters: Array[ValueConverter]): InternalRow = {
+      fieldConverters: Array[ValueConverter],
+      structFilters: StructFilters = new NoopFilters()): Option[InternalRow] = 
{
     val row = new GenericInternalRow(schema.length)
     var badRecordException: Option[Throwable] = None
+    var skipRow = false
 
-    while (nextUntil(parser, JsonToken.END_OBJECT)) {
+    structFilters.reset()
+    while (!skipRow && nextUntil(parser, JsonToken.END_OBJECT)) {
       schema.getFieldIndex(parser.getCurrentName) match {
         case Some(index) =>
           try {
             row.update(index, fieldConverters(index).apply(parser))
+            skipRow = structFilters.skipRow(row, index)
 
 Review comment:
   > Now this method works differently for root row and nested row. I wonder if 
we can decouple the filtering logic out of this method ...
   
   Initially, I just duplicate the code but in the commit collapsed it to one 
method: 
https://github.com/apache/spark/pull/27366/commits/94a22e1cbbc3fc5ca5d87f6b08c8b5adc121a7b8

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to