beliefer commented on code in PR #41561:
URL: https://github.com/apache/spark/pull/41561#discussion_r1230414050
##########
connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -3495,6 +3495,151 @@ object functions {
*/
def to_number(e: Column, format: Column): Column = Column.fn("to_number", e,
format)
+ /**
+ * Returns the ASCII character having the binary equivalent to `n`. If n is
larger than 256 the
+ * result is equivalent to char(n % 256)
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def char(n: Column): Column = Column.fn("char", n)
+
+ /**
+ * Removes the leading and trailing space characters from `str`.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def btrim(str: Column): Column = Column.fn("btrim", str)
+
+ /**
+ * Remove the leading and trailing `trim` characters from `str`.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def btrim(str: Column, trim: Column): Column = Column.fn("btrim", str, trim)
+
+ /**
+ * Returns the character length of string data or number of bytes of binary
data. The length of
+ * string data includes the trailing spaces. The length of binary data
includes binary zeros.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def char_length(str: Column): Column = Column.fn("char_length", str)
+
+ /**
+ * Returns the character length of string data or number of bytes of binary
data. The length of
+ * string data includes the trailing spaces. The length of binary data
includes binary zeros.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def character_length(str: Column): Column = Column.fn("character_length",
str)
+
+ /**
+ * Returns the ASCII character having the binary equivalent to `n`. If n is
larger than 256 the
+ * result is equivalent to chr(n % 256)
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def chr(n: Column): Column = Column.fn("chr", n)
+
+ /**
+ * Returns a boolean. The value is True if right is found inside left.
Returns NULL if either
+ * input expression is NULL. Otherwise, returns False. Both left or right
must be of STRING or
+ * BINARY type.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def contains(left: Column, right: Column): Column = Column.fn("contains",
left, right)
+
+ /**
+ * Returns the `n`-th input, e.g., returns `input2` when `n` is 2. The
function returns NULL if
+ * the index exceeds the length of the array and `spark.sql.ansi.enabled` is
set to false. If
+ * `spark.sql.ansi.enabled` is set to true, it throws
ArrayIndexOutOfBoundsException for invalid
+ * indices.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ @scala.annotation.varargs
+ def elt(inputs: Column*): Column = Column.fn("elt", inputs: _*)
+
+ /**
+ * Returns the index (1-based) of the given string (`str`) in the
comma-delimited list
+ * (`strArray`). Returns 0, if the string was not found or if the given
string (`str`) contains
+ * a comma.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def find_in_set(str: Column, strArray: Column): Column =
Column.fn("find_in_set", str, strArray)
+
+ /**
+ * Returns true if str matches `pattern` with `escape`, null if any
arguments are null, false
+ * otherwise.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def like(str: Column, pattern: Column): Column = Column.fn("like", str,
pattern)
Review Comment:
Please add `def like(str: Column, pattern: Column, escapeChar: Column):
Column`.
##########
connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/PlanGenerationTestSuite.scala:
##########
@@ -2136,6 +2136,70 @@ class PlanGenerationTestSuite
fn.to_unix_timestamp(fn.col("g"), fn.col("g"))
}
+ functionTest("char") {
+ fn.char(fn.col("a"))
+ }
+
+ functionTest("btrim") {
+ fn.btrim(fn.col("g"))
+ }
+
+ functionTest("btrim with trim") {
+ fn.btrim(fn.col("g"), fn.col("g"))
+ }
+
+ functionTest("char_length") {
+ fn.char_length(fn.col("g"))
+ }
+
+ functionTest("character_length") {
+ fn.character_length(fn.col("g"))
+ }
+
+ functionTest("chr") {
+ fn.chr(fn.col("a"))
+ }
+
+ functionTest("contains") {
+ fn.contains(fn.lit("Spark SQL"), fn.lit("Spark"))
+ }
+
+ functionTest("elt") {
+ fn.elt(fn.col("a"), fn.col("g"), fn.col("g"))
+ }
+
+ functionTest("find_in_set") {
+ fn.find_in_set(fn.col("g"), fn.col("g"))
+ }
+
+ functionTest("like") {
+ fn.like(fn.col("g"), fn.col("g"))
+ }
+
+ functionTest("ilike") {
Review Comment:
ditto.
##########
connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -3495,6 +3495,151 @@ object functions {
*/
def to_number(e: Column, format: Column): Column = Column.fn("to_number", e,
format)
+ /**
+ * Returns the ASCII character having the binary equivalent to `n`. If n is
larger than 256 the
+ * result is equivalent to char(n % 256)
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def char(n: Column): Column = Column.fn("char", n)
+
+ /**
+ * Removes the leading and trailing space characters from `str`.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def btrim(str: Column): Column = Column.fn("btrim", str)
+
+ /**
+ * Remove the leading and trailing `trim` characters from `str`.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def btrim(str: Column, trim: Column): Column = Column.fn("btrim", str, trim)
+
+ /**
+ * Returns the character length of string data or number of bytes of binary
data. The length of
+ * string data includes the trailing spaces. The length of binary data
includes binary zeros.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def char_length(str: Column): Column = Column.fn("char_length", str)
+
+ /**
+ * Returns the character length of string data or number of bytes of binary
data. The length of
+ * string data includes the trailing spaces. The length of binary data
includes binary zeros.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def character_length(str: Column): Column = Column.fn("character_length",
str)
+
+ /**
+ * Returns the ASCII character having the binary equivalent to `n`. If n is
larger than 256 the
+ * result is equivalent to chr(n % 256)
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def chr(n: Column): Column = Column.fn("chr", n)
+
+ /**
+ * Returns a boolean. The value is True if right is found inside left.
Returns NULL if either
+ * input expression is NULL. Otherwise, returns False. Both left or right
must be of STRING or
+ * BINARY type.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def contains(left: Column, right: Column): Column = Column.fn("contains",
left, right)
+
+ /**
+ * Returns the `n`-th input, e.g., returns `input2` when `n` is 2. The
function returns NULL if
+ * the index exceeds the length of the array and `spark.sql.ansi.enabled` is
set to false. If
+ * `spark.sql.ansi.enabled` is set to true, it throws
ArrayIndexOutOfBoundsException for invalid
+ * indices.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ @scala.annotation.varargs
+ def elt(inputs: Column*): Column = Column.fn("elt", inputs: _*)
+
+ /**
+ * Returns the index (1-based) of the given string (`str`) in the
comma-delimited list
+ * (`strArray`). Returns 0, if the string was not found or if the given
string (`str`) contains
+ * a comma.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def find_in_set(str: Column, strArray: Column): Column =
Column.fn("find_in_set", str, strArray)
+
+ /**
+ * Returns true if str matches `pattern` with `escape`, null if any
arguments are null, false
+ * otherwise.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def like(str: Column, pattern: Column): Column = Column.fn("like", str,
pattern)
+
+ /**
+ * Returns true if str matches `pattern` with `escape` case-insensitively,
null if any arguments
+ * are null, false otherwise.
+ *
+ * @group string_funcs
+ * @since 3.5.0
+ */
+ def ilike(str: Column, pattern: Column): Column = Column.fn("ilike", str,
pattern)
Review Comment:
ditto.
##########
connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/PlanGenerationTestSuite.scala:
##########
@@ -2136,6 +2136,70 @@ class PlanGenerationTestSuite
fn.to_unix_timestamp(fn.col("g"), fn.col("g"))
}
+ functionTest("char") {
+ fn.char(fn.col("a"))
+ }
+
+ functionTest("btrim") {
+ fn.btrim(fn.col("g"))
+ }
+
+ functionTest("btrim with trim") {
+ fn.btrim(fn.col("g"), fn.col("g"))
+ }
+
+ functionTest("char_length") {
+ fn.char_length(fn.col("g"))
+ }
+
+ functionTest("character_length") {
+ fn.character_length(fn.col("g"))
+ }
+
+ functionTest("chr") {
+ fn.chr(fn.col("a"))
+ }
+
+ functionTest("contains") {
+ fn.contains(fn.lit("Spark SQL"), fn.lit("Spark"))
+ }
+
+ functionTest("elt") {
+ fn.elt(fn.col("a"), fn.col("g"), fn.col("g"))
+ }
+
+ functionTest("find_in_set") {
+ fn.find_in_set(fn.col("g"), fn.col("g"))
+ }
+
+ functionTest("like") {
Review Comment:
Please add the test cases with escape character.
##########
connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/PlanGenerationTestSuite.scala:
##########
@@ -2136,6 +2136,70 @@ class PlanGenerationTestSuite
fn.to_unix_timestamp(fn.col("g"), fn.col("g"))
}
+ functionTest("char") {
+ fn.char(fn.col("a"))
+ }
+
+ functionTest("btrim") {
+ fn.btrim(fn.col("g"))
+ }
+
+ functionTest("btrim with trim") {
Review Comment:
`btrim with specified trim string`
##########
python/pyspark/sql/functions.py:
##########
@@ -8159,6 +8159,382 @@ def to_number(col: "ColumnOrName", format:
"ColumnOrName") -> Column:
return _invoke_function_over_columns("to_number", col, format)
+@try_remote_functions
+def char(n: "ColumnOrName") -> Column:
+ """
+ Returns the ASCII character having the binary equivalent to `n`. If n is
larger than 256 the
+ result is equivalent to char(n % 256)
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ n : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([(65,)], ['a'])
+ >>> df.select(char(df.a).alias('r')).collect()
+ [Row(r='A')]
+ """
+ return _invoke_function_over_columns("char", n)
+
+
+@try_remote_functions
+def btrim(str: "ColumnOrName", trim: Optional["ColumnOrName"] = None) ->
Column:
+ """
+ Remove the leading and trailing `trim` characters from `str`.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+ trim : :class:`~pyspark.sql.Column` or str
+ The trim string characters to trim, the default value is a single space
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SSparkSQLS", "SL", )], ['a', 'b'])
+ >>> df.select(btrim(df.a, df.b).alias('r')).collect()
+ [Row(r='parkSQ')]
+
+ >>> df = spark.createDataFrame([(" SparkSQL ",)], ['a'])
+ >>> df.select(btrim(df.a).alias('r')).collect()
+ [Row(r='SparkSQL')]
+ """
+ if trim is not None:
+ return _invoke_function_over_columns("btrim", str, trim)
+ else:
+ return _invoke_function_over_columns("btrim", str)
+
+
+@try_remote_functions
+def char_length(str: "ColumnOrName") -> Column:
+ """
+ Returns the character length of string data or number of bytes of binary
data.
+ The length of string data includes the trailing spaces.
+ The length of binary data includes binary zeros.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SparkSQL",)], ['a'])
+ >>> df.select(char_length(df.a).alias('r')).collect()
+ [Row(r=8)]
+ """
+ return _invoke_function_over_columns("char_length", str)
+
+
+@try_remote_functions
+def character_length(str: "ColumnOrName") -> Column:
+ """
+ Returns the character length of string data or number of bytes of binary
data.
+ The length of string data includes the trailing spaces.
+ The length of binary data includes binary zeros.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SparkSQL",)], ['a'])
+ >>> df.select(character_length(df.a).alias('r')).collect()
+ [Row(r=8)]
+ """
+ return _invoke_function_over_columns("character_length", str)
+
+
+@try_remote_functions
+def chr(n: "ColumnOrName") -> Column:
+ """
+ Returns the ASCII character having the binary equivalent to `n`.
+ If n is larger than 256 the result is equivalent to chr(n % 256)
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ n : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([(65,)], ['a'])
+ >>> df.select(chr(df.a).alias('r')).collect()
+ [Row(r='A')]
+ """
+ return _invoke_function_over_columns("chr", n)
+
+
+@try_remote_functions
+def contains(left: "ColumnOrName", right: "ColumnOrName") -> Column:
+ """
+ Returns a boolean. The value is True if right is found inside left.
+ Returns NULL if either input expression is NULL. Otherwise, returns False.
+ Both left or right must be of STRING or BINARY type.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ left : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
Review Comment:
Shall we describe it more detail?
##########
python/pyspark/sql/functions.py:
##########
@@ -8159,6 +8159,382 @@ def to_number(col: "ColumnOrName", format:
"ColumnOrName") -> Column:
return _invoke_function_over_columns("to_number", col, format)
+@try_remote_functions
+def char(n: "ColumnOrName") -> Column:
+ """
+ Returns the ASCII character having the binary equivalent to `n`. If n is
larger than 256 the
+ result is equivalent to char(n % 256)
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ n : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([(65,)], ['a'])
+ >>> df.select(char(df.a).alias('r')).collect()
+ [Row(r='A')]
+ """
+ return _invoke_function_over_columns("char", n)
+
+
+@try_remote_functions
+def btrim(str: "ColumnOrName", trim: Optional["ColumnOrName"] = None) ->
Column:
+ """
+ Remove the leading and trailing `trim` characters from `str`.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+ trim : :class:`~pyspark.sql.Column` or str
+ The trim string characters to trim, the default value is a single space
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SSparkSQLS", "SL", )], ['a', 'b'])
+ >>> df.select(btrim(df.a, df.b).alias('r')).collect()
+ [Row(r='parkSQ')]
+
+ >>> df = spark.createDataFrame([(" SparkSQL ",)], ['a'])
+ >>> df.select(btrim(df.a).alias('r')).collect()
+ [Row(r='SparkSQL')]
+ """
+ if trim is not None:
+ return _invoke_function_over_columns("btrim", str, trim)
+ else:
+ return _invoke_function_over_columns("btrim", str)
+
+
+@try_remote_functions
+def char_length(str: "ColumnOrName") -> Column:
+ """
+ Returns the character length of string data or number of bytes of binary
data.
+ The length of string data includes the trailing spaces.
+ The length of binary data includes binary zeros.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SparkSQL",)], ['a'])
+ >>> df.select(char_length(df.a).alias('r')).collect()
+ [Row(r=8)]
+ """
+ return _invoke_function_over_columns("char_length", str)
+
+
+@try_remote_functions
+def character_length(str: "ColumnOrName") -> Column:
+ """
+ Returns the character length of string data or number of bytes of binary
data.
+ The length of string data includes the trailing spaces.
+ The length of binary data includes binary zeros.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SparkSQL",)], ['a'])
+ >>> df.select(character_length(df.a).alias('r')).collect()
+ [Row(r=8)]
+ """
+ return _invoke_function_over_columns("character_length", str)
+
+
+@try_remote_functions
+def chr(n: "ColumnOrName") -> Column:
+ """
+ Returns the ASCII character having the binary equivalent to `n`.
+ If n is larger than 256 the result is equivalent to chr(n % 256)
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ n : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([(65,)], ['a'])
+ >>> df.select(chr(df.a).alias('r')).collect()
+ [Row(r='A')]
+ """
+ return _invoke_function_over_columns("chr", n)
+
+
+@try_remote_functions
+def contains(left: "ColumnOrName", right: "ColumnOrName") -> Column:
+ """
+ Returns a boolean. The value is True if right is found inside left.
+ Returns NULL if either input expression is NULL. Otherwise, returns False.
+ Both left or right must be of STRING or BINARY type.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ left : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+ right : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
Review Comment:
ditto.
##########
python/pyspark/sql/functions.py:
##########
@@ -8159,6 +8159,382 @@ def to_number(col: "ColumnOrName", format:
"ColumnOrName") -> Column:
return _invoke_function_over_columns("to_number", col, format)
+@try_remote_functions
+def char(n: "ColumnOrName") -> Column:
+ """
+ Returns the ASCII character having the binary equivalent to `n`. If n is
larger than 256 the
+ result is equivalent to char(n % 256)
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ n : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([(65,)], ['a'])
+ >>> df.select(char(df.a).alias('r')).collect()
+ [Row(r='A')]
+ """
+ return _invoke_function_over_columns("char", n)
+
+
+@try_remote_functions
+def btrim(str: "ColumnOrName", trim: Optional["ColumnOrName"] = None) ->
Column:
+ """
+ Remove the leading and trailing `trim` characters from `str`.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+ trim : :class:`~pyspark.sql.Column` or str
+ The trim string characters to trim, the default value is a single space
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SSparkSQLS", "SL", )], ['a', 'b'])
+ >>> df.select(btrim(df.a, df.b).alias('r')).collect()
+ [Row(r='parkSQ')]
+
+ >>> df = spark.createDataFrame([(" SparkSQL ",)], ['a'])
+ >>> df.select(btrim(df.a).alias('r')).collect()
+ [Row(r='SparkSQL')]
+ """
+ if trim is not None:
+ return _invoke_function_over_columns("btrim", str, trim)
+ else:
+ return _invoke_function_over_columns("btrim", str)
+
+
+@try_remote_functions
+def char_length(str: "ColumnOrName") -> Column:
+ """
+ Returns the character length of string data or number of bytes of binary
data.
+ The length of string data includes the trailing spaces.
+ The length of binary data includes binary zeros.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SparkSQL",)], ['a'])
+ >>> df.select(char_length(df.a).alias('r')).collect()
+ [Row(r=8)]
+ """
+ return _invoke_function_over_columns("char_length", str)
+
+
+@try_remote_functions
+def character_length(str: "ColumnOrName") -> Column:
+ """
+ Returns the character length of string data or number of bytes of binary
data.
+ The length of string data includes the trailing spaces.
+ The length of binary data includes binary zeros.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ str : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("SparkSQL",)], ['a'])
+ >>> df.select(character_length(df.a).alias('r')).collect()
+ [Row(r=8)]
+ """
+ return _invoke_function_over_columns("character_length", str)
+
+
+@try_remote_functions
+def chr(n: "ColumnOrName") -> Column:
+ """
+ Returns the ASCII character having the binary equivalent to `n`.
+ If n is larger than 256 the result is equivalent to chr(n % 256)
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ n : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([(65,)], ['a'])
+ >>> df.select(chr(df.a).alias('r')).collect()
+ [Row(r='A')]
+ """
+ return _invoke_function_over_columns("chr", n)
+
+
+@try_remote_functions
+def contains(left: "ColumnOrName", right: "ColumnOrName") -> Column:
+ """
+ Returns a boolean. The value is True if right is found inside left.
+ Returns NULL if either input expression is NULL. Otherwise, returns False.
+ Both left or right must be of STRING or BINARY type.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ left : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+ right : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
+
+ Examples
+ --------
+ >>> df = spark.createDataFrame([("Spark SQL", "Spark")], ['a', 'b'])
+ >>> df.select(contains(df.a, df.b).alias('r')).collect()
+ [Row(r=True)]
+ """
+ return _invoke_function_over_columns("contains", left, right)
+
+
+@try_remote_functions
+def elt(*inputs: "ColumnOrName") -> Column:
+ """
+ Returns the `n`-th input, e.g., returns `input2` when `n` is 2.
+ The function returns NULL if the index exceeds the length of the array
+ and `spark.sql.ansi.enabled` is set to false. If `spark.sql.ansi.enabled`
is set to true,
+ it throws ArrayIndexOutOfBoundsException for invalid indices.
+
+ .. versionadded:: 3.5.0
+
+ Parameters
+ ----------
+ inputs : :class:`~pyspark.sql.Column` or str
+ Input column or strings.
Review Comment:
Input columns or strings.
##########
python/pyspark/sql/functions.py:
##########
@@ -8159,6 +8159,382 @@ def to_number(col: "ColumnOrName", format:
"ColumnOrName") -> Column:
return _invoke_function_over_columns("to_number", col, format)
+@try_remote_functions
+def char(n: "ColumnOrName") -> Column:
Review Comment:
+1
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]