This is an automated email from the ASF dual-hosted git repository.

yangjie01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 9a6b27ecb15e [SPARK-46548][PYTHON][DOCS] Refine docstring of 
`get/array_zip/sort_array`
9a6b27ecb15e is described below

commit 9a6b27ecb15e656a34fe3b8744f23855548c830d
Author: yangjie01 <[email protected]>
AuthorDate: Sun Dec 31 15:47:22 2023 +0800

    [SPARK-46548][PYTHON][DOCS] Refine docstring of `get/array_zip/sort_array`
    
    ### What changes were proposed in this pull request?
    This pr refine docstring of `get/array_zip/sort_array` and add some new 
examples.
    
    ### Why are the changes needed?
    To improve PySpark documentation
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass Github Actions
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No
    
    Closes #44545 from LuciferYang/array-functions-sort.
    
    Authored-by: yangjie01 <[email protected]>
    Signed-off-by: yangjie01 <[email protected]>
---
 python/pyspark/sql/functions/builtin.py | 198 ++++++++++++++++++++++++--------
 1 file changed, 150 insertions(+), 48 deletions(-)

diff --git a/python/pyspark/sql/functions/builtin.py 
b/python/pyspark/sql/functions/builtin.py
index 0ff1ee2a7394..458bb8c8feaf 100644
--- a/python/pyspark/sql/functions/builtin.py
+++ b/python/pyspark/sql/functions/builtin.py
@@ -12810,7 +12810,7 @@ def try_element_at(col: "ColumnOrName", extraction: 
"ColumnOrName") -> Column:
 @_try_remote_functions
 def get(col: "ColumnOrName", index: Union["ColumnOrName", int]) -> Column:
     """
-    Collection function: Returns element of array at given (0-based) index.
+    Array function: Returns the element of an array at the given (0-based) 
index.
     If the index points outside of the array boundaries, then this function
     returns NULL.
 
@@ -12819,18 +12819,18 @@ def get(col: "ColumnOrName", index: 
Union["ColumnOrName", int]) -> Column:
     Parameters
     ----------
     col : :class:`~pyspark.sql.Column` or str
-        name of column containing array
+        Name of the column containing the array.
     index : :class:`~pyspark.sql.Column` or str or int
-        index to check for in array
+        Index to check for in the array.
 
     Returns
     -------
     :class:`~pyspark.sql.Column`
-        value at given position.
+        Value at the given position.
 
     Notes
     -----
-    The position is not 1 based, but 0 based index.
+    The position is not 1-based, but 0-based index.
     Supports Spark Connect.
 
     See Also
@@ -12839,41 +12839,61 @@ def get(col: "ColumnOrName", index: 
Union["ColumnOrName", int]) -> Column:
 
     Examples
     --------
-    >>> df = spark.createDataFrame([(["a", "b", "c"], 1)], ['data', 'index'])
-    >>> df.select(get(df.data, 1)).show()
+    Example 1: Getting an element at a fixed position
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([(["a", "b", "c"],)], ['data'])
+    >>> df.select(sf.get(df.data, 1)).show()
     +------------+
     |get(data, 1)|
     +------------+
     |           b|
     +------------+
 
-    >>> df.select(get(df.data, -1)).show()
-    +-------------+
-    |get(data, -1)|
-    +-------------+
-    |         NULL|
-    +-------------+
+    Example 2: Getting an element at a position outside the array boundaries
 
-    >>> df.select(get(df.data, 3)).show()
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([(["a", "b", "c"],)], ['data'])
+    >>> df.select(sf.get(df.data, 3)).show()
     +------------+
     |get(data, 3)|
     +------------+
     |        NULL|
     +------------+
 
-    >>> df.select(get(df.data, "index")).show()
+    Example 3: Getting an element at a position specified by another column
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([(["a", "b", "c"], 2)], ['data', 'index'])
+    >>> df.select(sf.get(df.data, df.index)).show()
     +----------------+
     |get(data, index)|
     +----------------+
-    |               b|
+    |               c|
     +----------------+
 
-    >>> df.select(get(df.data, col("index") - 1)).show()
+
+    Example 4: Getting an element at a position calculated from another column
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([(["a", "b", "c"], 2)], ['data', 'index'])
+    >>> df.select(sf.get(df.data, df.index - 1)).show()
     +----------------------+
     |get(data, (index - 1))|
     +----------------------+
-    |                     a|
+    |                     b|
     +----------------------+
+
+    Example 5: Getting an element at a negative position
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([(["a", "b", "c"], )], ['data'])
+    >>> df.select(sf.get(df.data, -1)).show()
+    +-------------+
+    |get(data, -1)|
+    +-------------+
+    |         NULL|
+    +-------------+
     """
     index = lit(index) if isinstance(index, int) else index
 
@@ -15064,7 +15084,7 @@ def cardinality(col: "ColumnOrName") -> Column:
 @_try_remote_functions
 def sort_array(col: "ColumnOrName", asc: bool = True) -> Column:
     """
-    Collection function: sorts the input array in ascending or descending 
order according
+    Array function: Sorts the input array in ascending or descending order 
according
     to the natural ordering of the array elements. Null elements will be 
placed at the beginning
     of the returned array in ascending order or at the end of the returned 
array in descending
     order.
@@ -15077,23 +15097,76 @@ def sort_array(col: "ColumnOrName", asc: bool = True) 
-> Column:
     Parameters
     ----------
     col : :class:`~pyspark.sql.Column` or str
-        name of column or expression
+        Name of the column or expression.
     asc : bool, optional
-        whether to sort in ascending or descending order. If `asc` is True 
(default)
-        then ascending and if False then descending.
+        Whether to sort in ascending or descending order. If `asc` is True 
(default),
+        then the sorting is in ascending order. If False, then in descending 
order.
 
     Returns
     -------
     :class:`~pyspark.sql.Column`
-        sorted array.
+        Sorted array.
 
     Examples
     --------
-    >>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
-    >>> df.select(sort_array(df.data).alias('r')).collect()
-    [Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
-    >>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
-    [Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
+    Example 1: Sorting an array in ascending order
+
+    >>> import pyspark.sql.functions as sf
+    >>> df = spark.createDataFrame([([2, 1, None, 3],)], ['data'])
+    >>> df.select(sf.sort_array(df.data)).show()
+    +----------------------+
+    |sort_array(data, true)|
+    +----------------------+
+    |       [NULL, 1, 2, 3]|
+    +----------------------+
+
+    Example 2: Sorting an array in descending order
+
+    >>> import pyspark.sql.functions as sf
+    >>> df = spark.createDataFrame([([2, 1, None, 3],)], ['data'])
+    >>> df.select(sf.sort_array(df.data, asc=False)).show()
+    +-----------------------+
+    |sort_array(data, false)|
+    +-----------------------+
+    |        [3, 2, 1, NULL]|
+    +-----------------------+
+
+    Example 3: Sorting an array with a single element
+
+    >>> import pyspark.sql.functions as sf
+    >>> df = spark.createDataFrame([([1],)], ['data'])
+    >>> df.select(sf.sort_array(df.data)).show()
+    +----------------------+
+    |sort_array(data, true)|
+    +----------------------+
+    |                   [1]|
+    +----------------------+
+
+    Example 4: Sorting an empty array
+
+    >>> from pyspark.sql import functions as sf
+    >>> from pyspark.sql.types import ArrayType, StringType, StructField, 
StructType
+    >>> schema = StructType([StructField("data", ArrayType(StringType()), 
True)])
+    >>> df = spark.createDataFrame([([],)], schema=schema)
+    >>> df.select(sf.sort_array(df.data)).show()
+    +----------------------+
+    |sort_array(data, true)|
+    +----------------------+
+    |                    []|
+    +----------------------+
+
+    Example 5: Sorting an array with null values
+
+    >>> from pyspark.sql import functions as sf
+    >>> from pyspark.sql.types import ArrayType, IntegerType, StructType, 
StructField
+    >>> schema = StructType([StructField("data", ArrayType(IntegerType()), 
True)])
+    >>> df = spark.createDataFrame([([None, None, None],)], schema=schema)
+    >>> df.select(sf.sort_array(df.data)).show()
+    +----------------------+
+    |sort_array(data, true)|
+    +----------------------+
+    |    [NULL, NULL, NULL]|
+    +----------------------+
     """
     return _invoke_function("sort_array", _to_java_column(col), asc)
 
@@ -15523,9 +15596,9 @@ def array_repeat(col: "ColumnOrName", count: 
Union["ColumnOrName", int]) -> Colu
 @_try_remote_functions
 def arrays_zip(*cols: "ColumnOrName") -> Column:
     """
-    Collection function: Returns a merged array of structs in which the N-th 
struct contains all
+    Array function: Returns a merged array of structs in which the N-th struct 
contains all
     N-th values of input arrays. If one of the arrays is shorter than others 
then
-    resulting struct type value will be a `null` for missing elements.
+    the resulting struct type value will be a `null` for missing elements.
 
     .. versionadded:: 2.4.0
 
@@ -15535,31 +15608,60 @@ def arrays_zip(*cols: "ColumnOrName") -> Column:
     Parameters
     ----------
     cols : :class:`~pyspark.sql.Column` or str
-        columns of arrays to be merged.
+        Columns of arrays to be merged.
 
     Returns
     -------
     :class:`~pyspark.sql.Column`
-        merged array of entries.
+        Merged array of entries.
 
     Examples
     --------
-    >>> from pyspark.sql.functions import arrays_zip
-    >>> df = spark.createDataFrame([([1, 2, 3], [2, 4, 6], [3, 6])], ['vals1', 
'vals2', 'vals3'])
-    >>> df = df.select(arrays_zip(df.vals1, df.vals2, 
df.vals3).alias('zipped'))
-    >>> df.show(truncate=False)
-    +------------------------------------+
-    |zipped                              |
-    +------------------------------------+
-    |[{1, 2, 3}, {2, 4, 6}, {3, 6, NULL}]|
-    +------------------------------------+
-    >>> df.printSchema()
-    root
-     |-- zipped: array (nullable = true)
-     |    |-- element: struct (containsNull = false)
-     |    |    |-- vals1: long (nullable = true)
-     |    |    |-- vals2: long (nullable = true)
-     |    |    |-- vals3: long (nullable = true)
+    Example 1: Zipping two arrays of the same length
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([([1, 2, 3], ['a', 'b', 'c'])], ['nums', 
'letters'])
+    >>> df.select(sf.arrays_zip(df.nums, df.letters)).show(truncate=False)
+    +-------------------------+
+    |arrays_zip(nums, letters)|
+    +-------------------------+
+    |[{1, a}, {2, b}, {3, c}] |
+    +-------------------------+
+
+
+    Example 2: Zipping arrays of different lengths
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([([1, 2], ['a', 'b', 'c'])], ['nums', 
'letters'])
+    >>> df.select(sf.arrays_zip(df.nums, df.letters)).show(truncate=False)
+    +---------------------------+
+    |arrays_zip(nums, letters)  |
+    +---------------------------+
+    |[{1, a}, {2, b}, {NULL, c}]|
+    +---------------------------+
+
+    Example 3: Zipping more than two arrays
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame(
+    ...   [([1, 2], ['a', 'b'], [True, False])], ['nums', 'letters', 'bools'])
+    >>> df.select(sf.arrays_zip(df.nums, df.letters, 
df.bools)).show(truncate=False)
+    +--------------------------------+
+    |arrays_zip(nums, letters, bools)|
+    +--------------------------------+
+    |[{1, a, true}, {2, b, false}]   |
+    +--------------------------------+
+
+    Example 4: Zipping arrays with null values
+
+    >>> from pyspark.sql import functions as sf
+    >>> df = spark.createDataFrame([([1, 2, None], ['a', None, 'c'])], 
['nums', 'letters'])
+    >>> df.select(sf.arrays_zip(df.nums, df.letters)).show(truncate=False)
+    +------------------------------+
+    |arrays_zip(nums, letters)     |
+    +------------------------------+
+    |[{1, a}, {2, NULL}, {NULL, c}]|
+    +------------------------------+
     """
     return _invoke_function_over_seq_of_columns("arrays_zip", cols)
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to