dtenedor commented on code in PR #48143:
URL: https://github.com/apache/spark/pull/48143#discussion_r1768815503
##########
python/pyspark/sql/functions/builtin.py:
##########
@@ -11861,6 +11861,44 @@ def regexp_like(str: "ColumnOrName", regexp:
"ColumnOrName") -> Column:
return _invoke_function_over_columns("regexp_like", str, regexp)
+@_try_remote_functions
+def randstr(length: Union[Column, int], seed: Optional[Union[Column, int]] =
None) -> Column:
+ """Returns a string of the specified length whose characters are chosen
uniformly at random from
+ the following pool of characters: 0-9, a-z, A-Z. The random seed is
optional. The string length
+ must be a constant two-byte or four-byte integer (SMALLINT or INT,
respectively).
+
+ .. versionadded:: 4.0.0
+
+ Parameters
+ ----------
+ length : :class:`~pyspark.sql.Column` or int
+ Number of characters in the string to generate.
+ seed : :class:`~pyspark.sql.Column` or int
+ Optional random number seed to use.
+
+ Returns
+ -------
+ :class:`~pyspark.sql.Column`
+ The generated random string with the specified length.
+
+ Examples
+ --------
+ >>> from pyspark.sql import functions as sf
+ >>> df = spark.createDataFrame([('3',)], ['a'])
+ >>> df.select(sf.randstr(lit(5), lit(0)).alias('result')).show()
+ +------+
+ |result|
+ +------+
+ | ceV0P|
+ +------+
+
Review Comment:
Sounds good, this is done.
##########
python/pyspark/sql/functions/builtin.py:
##########
@@ -12227,6 +12265,52 @@ def unhex(col: "ColumnOrName") -> Column:
return _invoke_function_over_columns("unhex", col)
+@_try_remote_functions
+def uniform(
+ min: Union[Column, int, float],
+ max: Union[Column, int, float],
+ seed: Optional[Union[Column, int]] = None,
+) -> Column:
+ """Returns a random value with independent and identically distributed
(i.i.d.) values with the
+ specified range of numbers. The random seed is optional. The provided
numbers specifying the
+ minimum and maximum values of the range must be constant. If both of these
numbers are integers,
+ then the result will also be an integer. Otherwise if one or both of these
are floating-point
+ numbers, then the result will also be a floating-point number.
+
+ .. versionadded:: 4.0.0
+
+ Parameters
+ ----------
+ min : :class:`~pyspark.sql.Column`, int, or float
+ Minimum value in the range.
+ max : :class:`~pyspark.sql.Column`, int, or float
+ Maximum value in the range.
+ seed : :class:`~pyspark.sql.Column` or int
+ Optional random number seed to use.
+
+ Returns
+ -------
+ :class:`~pyspark.sql.Column`
+ The generated random number within the specified range.
+
+ Examples
+ --------
+ >>> from pyspark.sql import functions as sf
+ >>> df = spark.createDataFrame([('3',)], ['a'])
+ >>> df.select(sf.uniform(lit(0), lit(10), lit(0)).alias('result')).show()
+ +------+
+ |result|
+ +------+
+ | 7|
+ +------+
+
Review Comment:
Sounds good, this is done.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]