zhengruifeng commented on code in PR #41444:
URL: https://github.com/apache/spark/pull/41444#discussion_r1222325771
##########
python/pyspark/sql/tests/test_functions.py:
##########
@@ -709,6 +709,52 @@ def test_overlay(self):
message_parameters={"arg_name": "len", "arg_type": "float"},
)
+ def test_percentile(self):
+ actual = list(
+ chain.from_iterable(
+ [
+ re.findall("(percentile\\(.*\\))", str(x))
+ for x in [
+ F.percentile(F.col("foo"), F.lit(0.5)),
+ F.percentile(F.col("bar"), 0.25, 2),
+ F.percentile(F.col("bar"), [0.25, 0.5, 0.75]),
+ F.percentile(F.col("foo"), (0.05, 0.95), 100),
+ F.percentile("foo", 0.5),
+ F.percentile("bar", [0.1, 0.9], F.lit(10)),
+ ]
+ ]
+ )
+ )
+
+ expected = [
+ "percentile(foo, 0.5, 1)",
+ "percentile(bar, 0.25, 2)",
+ "percentile(bar, array(0.25, 0.5, 0.75), 1)",
+ "percentile(foo, array(0.05, 0.95), 100)",
+ "percentile(foo, 0.5, 1)",
+ "percentile(bar, array(0.1, 0.9), 10)",
+ ]
+
+ self.assertListEqual(actual, expected)
+
+ def test_median(self):
Review Comment:
why test `median`? it already exists
https://github.com/apache/spark/blob/2057eb7e203c9fde3f4fa13d5f04225cf6e49a87/python/pyspark/sql/functions.py#L701
##########
sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala:
##########
@@ -1006,6 +1006,29 @@ class DataFrameAggregateSuite extends QueryTest
}
}
+ test("percentile_like") {
+ // percentile
+ checkAnswer(
+ courseSales.groupBy("course").agg(
+ percentile(col("year"), lit(0.3)),
+ percentile(col("year"), lit(Array(0.25, 0.75))),
+ percentile(col("year"), lit(0.3), lit(2)),
+ percentile(col("year"), lit(Array(0.25, 0.75)), lit(2))
+ ),
+ Row("Java", 2012.2999999999997, Seq(2012.25, 2012.75), 2012.0,
Seq(2012.0, 2013.0)) ::
+ Row("dotNET", 2012.0, Seq(2012.0, 2012.5), 2012.0, Seq(2012.0,
2012.75)) :: Nil
+ )
+
+ // median
Review Comment:
ditto
##########
python/pyspark/sql/functions.py:
##########
@@ -3253,6 +3253,72 @@ def nanvl(col1: "ColumnOrName", col2: "ColumnOrName") ->
Column:
return _invoke_function_over_columns("nanvl", col1, col2)
+@try_remote_functions
+def percentile(
+ col: "ColumnOrName",
+ percentage: Union[Column, float, List[float], Tuple[float]],
+ frequency: Union[Column, int] = 1,
+) -> Column:
+ """Returns the exact percentile(s) of numeric column `expr` at the given
percentage(s)
+ with value range in [0.0, 1.0].
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ col : :class:`~pyspark.sql.Column` or str input column.
+ percentage : :class:`~pyspark.sql.Column`, float, list of floats or tuple
of floats
+ percentage in decimal (must be between 0.0 and 1.0).
+ frequency : :class:`~pyspark.sql.Column` or int is a positive numeric
literal which
+ controls frequency.
+
+ Returns
+ -------
+ :class:`~pyspark.sql.Column`
+ the exact `percentile` of the numeric column.
+
+ Examples
+ --------
+ >>> key = (col("id") % 3).alias("key")
+ >>> value = (randn(42) + key * 10).alias("value")
+ >>> df = spark.range(0, 1000, 1, 1).select(key, value)
+ >>> df.select(
+ ... percentile("value", [0.25, 0.5, 0.75], lit(1)).alias("quantiles")
+ ... ).printSchema()
+ root
+ |-- quantiles: array (nullable = true)
+ | |-- element: double (containsNull = false)
+
+ >>> df.groupBy("key").agg(
+ ... percentile("value", 0.5, lit(1)).alias("median")
+ ... ).printSchema()
Review Comment:
ditto
##########
python/pyspark/sql/functions.py:
##########
@@ -3253,6 +3253,72 @@ def nanvl(col1: "ColumnOrName", col2: "ColumnOrName") ->
Column:
return _invoke_function_over_columns("nanvl", col1, col2)
+@try_remote_functions
+def percentile(
+ col: "ColumnOrName",
+ percentage: Union[Column, float, List[float], Tuple[float]],
+ frequency: Union[Column, int] = 1,
+) -> Column:
+ """Returns the exact percentile(s) of numeric column `expr` at the given
percentage(s)
+ with value range in [0.0, 1.0].
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ col : :class:`~pyspark.sql.Column` or str input column.
+ percentage : :class:`~pyspark.sql.Column`, float, list of floats or tuple
of floats
+ percentage in decimal (must be between 0.0 and 1.0).
+ frequency : :class:`~pyspark.sql.Column` or int is a positive numeric
literal which
+ controls frequency.
+
+ Returns
+ -------
+ :class:`~pyspark.sql.Column`
+ the exact `percentile` of the numeric column.
+
+ Examples
+ --------
+ >>> key = (col("id") % 3).alias("key")
+ >>> value = (randn(42) + key * 10).alias("value")
+ >>> df = spark.range(0, 1000, 1, 1).select(key, value)
+ >>> df.select(
+ ... percentile("value", [0.25, 0.5, 0.75], lit(1)).alias("quantiles")
+ ... ).printSchema()
Review Comment:
I think we need `show()` here to check the results?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]