LuciferYang commented on code in PR #42860:
URL: https://github.com/apache/spark/pull/42860#discussion_r1322315517
##########
python/pyspark/sql/functions.py:
##########
@@ -12377,31 +12377,138 @@ def explode(col: "ColumnOrName") -> Column:
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
- target column to work on.
+ Target column to work on.
Returns
-------
:class:`~pyspark.sql.Column`
- one row per array item or map key value.
+ One row per array item or map key value.
See Also
--------
:meth:`pyspark.functions.posexplode`
:meth:`pyspark.functions.explode_outer`
:meth:`pyspark.functions.posexplode_outer`
+ Notes
+ -----
+ Only one explode is allowed per SELECT clause.
+
Examples
--------
+ Example 1: Exploding an array column
+
+ >>> import pyspark.sql.functions as sf
>>> from pyspark.sql import Row
- >>> df = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a":
"b"})])
- >>> df.select(explode(df.intlist).alias("anInt")).collect()
- [Row(anInt=1), Row(anInt=2), Row(anInt=3)]
+ >>> df = spark.createDataFrame([Row(id=1, values=[1, 2, 3])])
+ >>> df.select(sf.explode(df.values).alias("value")).show()
+ +-----+
+ |value|
+ +-----+
+ | 1|
+ | 2|
+ | 3|
+ +-----+
+
+ Example 2: Exploding a map column
- >>> df.select(explode(df.mapfield).alias("key", "value")).show()
+ >>> import pyspark.sql.functions as sf
+ >>> from pyspark.sql import Row
+ >>> df = spark.createDataFrame([Row(id=1, values={"a": "b", "c": "d"})])
+ >>> df.select(sf.explode(df.values).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+ | c| d|
+ +---+-----+
+
+ Example 3: Exploding an array column with multiple rows
+
+ >>> import pyspark.sql.functions as sf
+ >>> from pyspark.sql import Row
+ >>> df = spark.createDataFrame(
+ ... [Row(id=1, values=[1, 2]), Row(id=2, values=[3, 4])])
+ >>> df.select("id", sf.explode(df.values).alias("value")).show()
+ +---+-----+
+ | id|value|
+ +---+-----+
+ | 1| 1|
+ | 1| 2|
+ | 2| 3|
+ | 2| 4|
+ +---+-----+
+
+ Example 4: Exploding a map column with multiple rows
+
+ >>> import pyspark.sql.functions as sf
+ >>> from pyspark.sql import Row
+ >>> df = spark.createDataFrame([
+ ... Row(id=1, values={"a": "b", "c": "d"}),
+ ... Row(id=2, values={"e": "f", "g": "h"})
+ ... ])
+ >>> df.select("id", sf.explode(df.values).alias("key", "value")).show()
+ +---+---+-----+
+ | id|key|value|
+ +---+---+-----+
+ | 1| a| b|
+ | 1| c| d|
+ | 2| e| f|
+ | 2| g| h|
+ +---+---+-----+
+
+ Example 5: Exploding multiple array columns
+
+ >>> import pyspark.sql.functions as sf
+ >>> from pyspark.sql import Row
+ >>> df = spark.createDataFrame([Row(a=1, list1=[1, 2], list2=[3, 4])])
+ >>> df.select(sf.explode(df.list1).alias("list1"), "list2") \
+ ... .select("list1", sf.explode(df.list2).alias("list2")).show()
Review Comment:
seems the test failure is related to this one
```
**********************************************************************
File "/__w/spark/spark/python/pyspark/sql/functions.py", line 286, in
pyspark.sql.functions.explode
Failed example:
df.select(sf.explode(df.list1).alias("list1"), "list2") ...
.select("list1", sf.explode(df.list2).alias("list2")).show()
Exception raised:
Traceback (most recent call last):
File "/usr/local/pypy/pypy3.8/lib/pypy3.8/doctest.py", line 1338, in
__run
exec(compile(example.source, filename, "single",
File "<doctest pyspark.sql.functions.explode[19]>", line 1
df.select(sf.explode(df.list1).alias("list1"), "list2") ...
.select("list1", sf.explode(df.list2).alias("list2")).show()
^
SyntaxError: invalid syntax
**********************************************************************
1 of 33 in pyspark.sql.functions.explode
***Test Failed*** 1 failures.
/usr/local/pypy/pypy3.8/lib/pypy3.8/runpy.py:127: RuntimeWarning:
'pyspark.sql.functions' found in sys.modules after import of package
'pyspark.sql', but prior to execution of 'pyspark.sql.functions'; this may
result in unpredictable behaviour
warn(RuntimeWarning(msg))
/__w/spark/spark/python/pyspark/sql/udtf.py:163: UserWarning: Arrow
optimization for Python UDTFs cannot be enabled: PyArrow >= 4.0.0 must be
installed; however, it was not found.. Falling back to using regular Python
UDTFs.
warnings.warn(
Had test failures in pyspark.sql.functions with pypy3; see logs.
Error: running /__w/spark/spark/python/run-tests
--modules=pyspark-sql,pyspark-testing --parallelism=1 ; received return code 255
Error: Process completed with exit code 19.
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]