[ 
https://issues.apache.org/jira/browse/SPARK-21550?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16374218#comment-16374218
 ] 

Javier edited comment on SPARK-21550 at 2/23/18 10:59 AM:
----------------------------------------------------------

I still observe this behavior in 2.2.0 when approxQuantile is applied to a 
single column that only contains None's:


{code:java}
...
 File "/pyspark.zip/pyspark/sql/dataframe.py", line 1402, in approxQuantile
 File "/py4j-0.10.4-src.zip/py4j/java_gateway.py", line 1133, in _call_
 File "/pyspark.zip/pyspark/sql/utils.py", line 63, in deco
 File "/py4j-0.10.4-src.zip/py4j/protocol.py", line 319, in get_return_value
 py4j.protocol.Py4JJavaError: An error occurred while calling 
o202.approxQuantile.
 : java.util.NoSuchElementException: next on empty iterator
 at scala.collection.Iterator$$anon$2.next(Iterator.scala:39)
 at scala.collection.Iterator$$anon$2.next(Iterator.scala:37)
 at scala.collection.IndexedSeqLike$Elements.next(IndexedSeqLike.scala:63)
 at scala.collection.IterableLike$class.head(IterableLike.scala:107)
 at 
scala.collection.mutable.ArrayOps$ofRef.scala$collection$IndexedSeqOptimized$$super$head(ArrayOps.scala:186)
 at 
scala.collection.IndexedSeqOptimized$class.head(IndexedSeqOptimized.scala:126)
 at scala.collection.mutable.ArrayOps$ofRef.head(ArrayOps.scala:186)
 at scala.collection.TraversableLike$class.last(TraversableLike.scala:431)
 at 
scala.collection.mutable.ArrayOps$ofRef.scala$collection$IndexedSeqOptimized$$super$last(ArrayOps.scala:186)
 at 
scala.collection.IndexedSeqOptimized$class.last(IndexedSeqOptimized.scala:132)
 at scala.collection.mutable.ArrayOps$ofRef.last(ArrayOps.scala:186)
 at 
org.apache.spark.sql.catalyst.util.QuantileSummaries.query(QuantileSummaries.scala:207)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply$mcDD$sp(StatFunctions.scala:92)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply(StatFunctions.scala:92)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply(StatFunctions.scala:92)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
 at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:35)
 at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
 at scala.collection.AbstractTraversable.map(Traversable.scala:104)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1.apply(StatFunctions.scala:92)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1.apply(StatFunctions.scala:92)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
 at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
 at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
 at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:186)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$.multipleApproxQuantiles(StatFunctions.scala:92)
 at 
org.apache.spark.sql.DataFrameStatFunctions.approxQuantile(DataFrameStatFunctions.scala:73)
 at 
org.apache.spark.sql.DataFrameStatFunctions.approxQuantile(DataFrameStatFunctions.scala:84)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:498)
 at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
 at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
 at py4j.Gateway.invoke(Gateway.java:280)
 at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
 at py4j.commands.CallCommand.execute(CallCommand.java:79)
 at py4j.GatewayConnection.run(GatewayConnection.java:214)
 at java.lang.Thread.run(Thread.java:748){code}
 


was (Author: jabot):
I still observe this behavior in 2.2.0 when approxQuantile is applied to a 
single column that only contains None's:
 \{{}}
{code:java}
...
 File "/pyspark.zip/pyspark/sql/dataframe.py", line 1402, in approxQuantile
 File "/py4j-0.10.4-src.zip/py4j/java_gateway.py", line 1133, in _call_
 File "/pyspark.zip/pyspark/sql/utils.py", line 63, in deco
 File "/py4j-0.10.4-src.zip/py4j/protocol.py", line 319, in get_return_value
 py4j.protocol.Py4JJavaError: An error occurred while calling 
o202.approxQuantile.
 : java.util.NoSuchElementException: next on empty iterator
 at scala.collection.Iterator$$anon$2.next(Iterator.scala:39)
 at scala.collection.Iterator$$anon$2.next(Iterator.scala:37)
 at scala.collection.IndexedSeqLike$Elements.next(IndexedSeqLike.scala:63)
 at scala.collection.IterableLike$class.head(IterableLike.scala:107)
 at 
scala.collection.mutable.ArrayOps$ofRef.scala$collection$IndexedSeqOptimized$$super$head(ArrayOps.scala:186)
 at 
scala.collection.IndexedSeqOptimized$class.head(IndexedSeqOptimized.scala:126)
 at scala.collection.mutable.ArrayOps$ofRef.head(ArrayOps.scala:186)
 at scala.collection.TraversableLike$class.last(TraversableLike.scala:431)
 at 
scala.collection.mutable.ArrayOps$ofRef.scala$collection$IndexedSeqOptimized$$super$last(ArrayOps.scala:186)
 at 
scala.collection.IndexedSeqOptimized$class.last(IndexedSeqOptimized.scala:132)
 at scala.collection.mutable.ArrayOps$ofRef.last(ArrayOps.scala:186)
 at 
org.apache.spark.sql.catalyst.util.QuantileSummaries.query(QuantileSummaries.scala:207)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply$mcDD$sp(StatFunctions.scala:92)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply(StatFunctions.scala:92)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply(StatFunctions.scala:92)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
 at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:35)
 at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
 at scala.collection.AbstractTraversable.map(Traversable.scala:104)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1.apply(StatFunctions.scala:92)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1.apply(StatFunctions.scala:92)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
 at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
 at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
 at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:186)
 at 
org.apache.spark.sql.execution.stat.StatFunctions$.multipleApproxQuantiles(StatFunctions.scala:92)
 at 
org.apache.spark.sql.DataFrameStatFunctions.approxQuantile(DataFrameStatFunctions.scala:73)
 at 
org.apache.spark.sql.DataFrameStatFunctions.approxQuantile(DataFrameStatFunctions.scala:84)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
 at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:498)
 at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
 at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
 at py4j.Gateway.invoke(Gateway.java:280)
 at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
 at py4j.commands.CallCommand.execute(CallCommand.java:79)
 at py4j.GatewayConnection.run(GatewayConnection.java:214)
 at java.lang.Thread.run(Thread.java:748){code}
 

> approxQuantiles throws "next on empty iterator" on empty data
> -------------------------------------------------------------
>
>                 Key: SPARK-21550
>                 URL: https://issues.apache.org/jira/browse/SPARK-21550
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 2.1.0
>            Reporter: peay
>            Priority: Major
>             Fix For: 2.2.0
>
>
> The documentation says:
> {code}
> null and NaN values will be removed from the numerical column before 
> calculation. If
> the dataframe is empty or the column only contains null or NaN, an empty 
> array is returned.
> {code}
> However, this small pyspark example
> {code}
> sql_context.range(10).filter(col("id") == 42).approxQuantile("id", [0.99], 
> 0.001)
> {code}
> throws
> {code}
> Py4JJavaError: An error occurred while calling o3493.approxQuantile.
> : java.util.NoSuchElementException: next on empty iterator
>       at scala.collection.Iterator$$anon$2.next(Iterator.scala:39)
>       at scala.collection.Iterator$$anon$2.next(Iterator.scala:37)
>       at 
> scala.collection.IndexedSeqLike$Elements.next(IndexedSeqLike.scala:63)
>       at scala.collection.IterableLike$class.head(IterableLike.scala:107)
>       at 
> scala.collection.mutable.ArrayOps$ofRef.scala$collection$IndexedSeqOptimized$$super$head(ArrayOps.scala:186)
>       at 
> scala.collection.IndexedSeqOptimized$class.head(IndexedSeqOptimized.scala:126)
>       at scala.collection.mutable.ArrayOps$ofRef.head(ArrayOps.scala:186)
>       at 
> scala.collection.TraversableLike$class.last(TraversableLike.scala:431)
>       at 
> scala.collection.mutable.ArrayOps$ofRef.scala$collection$IndexedSeqOptimized$$super$last(ArrayOps.scala:186)
>       at 
> scala.collection.IndexedSeqOptimized$class.last(IndexedSeqOptimized.scala:132)
>       at scala.collection.mutable.ArrayOps$ofRef.last(ArrayOps.scala:186)
>       at 
> org.apache.spark.sql.catalyst.util.QuantileSummaries.query(QuantileSummaries.scala:207)
>       at 
> org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply$mcDD$sp(StatFunctions.scala:92)
>       at 
> org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply(StatFunctions.scala:92)
>       at 
> org.apache.spark.sql.execution.stat.StatFunctions$$anonfun$multipleApproxQuantiles$1$$anonfun$apply$1.apply(StatFunctions.scala:92)
> {code}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to