zhengruifeng commented on code in PR #41505:
URL: https://github.com/apache/spark/pull/41505#discussion_r1222766865


##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {
+    new StringToMap(text.expr, pairDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column): Column = withExpr {
+    new StringToMap(text.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the supplied `fmt`.
+   * `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+   * By default, the binary format for conversion is "hex" if `fmt` is omitted.
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column, f: Column): Column = withExpr {

Review Comment:
   ```suggestion
     def to_binary(e: Column, format: Column): Column = withExpr {
   ```
   



##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {
+    new StringToMap(text.expr, pairDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column): Column = withExpr {
+    new StringToMap(text.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the supplied `fmt`.
+   * `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+   * By default, the binary format for conversion is "hex" if `fmt` is omitted.
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column, f: Column): Column = withExpr {
+    new ToBinary(e.expr, f.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the format "hex".
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column): Column = withExpr {
+    new ToBinary(e.expr)
+  }
+
+  /**
+   * Convert `numberExpr` to a string based on the `formatExpr`.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input value, generating a 
result string of the
+   *     same length as the corresponding sequence in the format string. The 
result string is
+   *     left-padded with zeros if the 0/9 sequence comprises more digits than 
the matching part of
+   *     the decimal value, starts with 0, and is before the decimal point. 
Otherwise, it is
+   *     padded with spaces.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' prints '+' 
for positive values
+   *     but 'MI' prints a space.
+   *   'PR': Only allowed at the end of the format string; specifies that the 
result string will be
+   *     wrapped by angle brackets if the input value is negative.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_char(left: Column, right: Column): Column = withExpr {
+    ToCharacter(left.expr, right.expr)
+  }
+
+  /**
+   * Convert string 'expr' to a number based on the string format 'fmt'.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input string. If the 0/9 
sequence starts with
+   *     0 and is before the decimal point, it can only match a digit sequence 
of the same size.
+   *     Otherwise, if the sequence starts with 9 or is after the decimal 
point, it can match a
+   *     digit sequence that has the same or smaller size.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator. 'expr' 
must match the
+   *     grouping separator relevant for the size of the number.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' allows '-' 
but 'MI' does not.
+   *   'PR': Only allowed at the end of the format string; specifies that 
'expr' indicates a
+   *     negative number with wrapping angled brackets.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_number(left: Column, right: Column): Column = withExpr {

Review Comment:
   ```suggestion
     def to_number(e: Column, format: Column): Column = withExpr {
   ```



##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {

Review Comment:
   `def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column` is missing?



##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {
+    new StringToMap(text.expr, pairDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column): Column = withExpr {
+    new StringToMap(text.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the supplied `fmt`.
+   * `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+   * By default, the binary format for conversion is "hex" if `fmt` is omitted.
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column, f: Column): Column = withExpr {
+    new ToBinary(e.expr, f.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the format "hex".
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column): Column = withExpr {
+    new ToBinary(e.expr)
+  }
+
+  /**
+   * Convert `numberExpr` to a string based on the `formatExpr`.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input value, generating a 
result string of the
+   *     same length as the corresponding sequence in the format string. The 
result string is
+   *     left-padded with zeros if the 0/9 sequence comprises more digits than 
the matching part of
+   *     the decimal value, starts with 0, and is before the decimal point. 
Otherwise, it is
+   *     padded with spaces.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' prints '+' 
for positive values
+   *     but 'MI' prints a space.
+   *   'PR': Only allowed at the end of the format string; specifies that the 
result string will be
+   *     wrapped by angle brackets if the input value is negative.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_char(left: Column, right: Column): Column = withExpr {
+    ToCharacter(left.expr, right.expr)
+  }
+
+  /**
+   * Convert string 'expr' to a number based on the string format 'fmt'.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input string. If the 0/9 
sequence starts with
+   *     0 and is before the decimal point, it can only match a digit sequence 
of the same size.
+   *     Otherwise, if the sequence starts with 9 or is after the decimal 
point, it can match a
+   *     digit sequence that has the same or smaller size.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator. 'expr' 
must match the
+   *     grouping separator relevant for the size of the number.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' allows '-' 
but 'MI' does not.
+   *   'PR': Only allowed at the end of the format string; specifies that 
'expr' indicates a
+   *     negative number with wrapping angled brackets.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_number(left: Column, right: Column): Column = withExpr {
+    ToNumber(left.expr, right.expr)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the `fmt` expression
+   * to a timestamp without time zone. Returns null with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ltz(timestamp_str: Column, fmt: Column): Column = withExpr {

Review Comment:
   ```suggestion
     def to_timestamp_ltz(timestamp_str: Column, format: Column): Column = 
withExpr {
   ```



##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {
+    new StringToMap(text.expr, pairDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column): Column = withExpr {
+    new StringToMap(text.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the supplied `fmt`.
+   * `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+   * By default, the binary format for conversion is "hex" if `fmt` is omitted.
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column, f: Column): Column = withExpr {
+    new ToBinary(e.expr, f.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the format "hex".
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column): Column = withExpr {
+    new ToBinary(e.expr)
+  }
+
+  /**
+   * Convert `numberExpr` to a string based on the `formatExpr`.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input value, generating a 
result string of the
+   *     same length as the corresponding sequence in the format string. The 
result string is
+   *     left-padded with zeros if the 0/9 sequence comprises more digits than 
the matching part of
+   *     the decimal value, starts with 0, and is before the decimal point. 
Otherwise, it is
+   *     padded with spaces.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' prints '+' 
for positive values
+   *     but 'MI' prints a space.
+   *   'PR': Only allowed at the end of the format string; specifies that the 
result string will be
+   *     wrapped by angle brackets if the input value is negative.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_char(left: Column, right: Column): Column = withExpr {

Review Comment:
   ```suggestion
     def to_char(e: Column, format: Column): Column = withExpr {
   ```



##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {
+    new StringToMap(text.expr, pairDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column): Column = withExpr {
+    new StringToMap(text.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the supplied `fmt`.
+   * `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+   * By default, the binary format for conversion is "hex" if `fmt` is omitted.
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column, f: Column): Column = withExpr {
+    new ToBinary(e.expr, f.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the format "hex".
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column): Column = withExpr {
+    new ToBinary(e.expr)
+  }
+
+  /**
+   * Convert `numberExpr` to a string based on the `formatExpr`.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input value, generating a 
result string of the
+   *     same length as the corresponding sequence in the format string. The 
result string is
+   *     left-padded with zeros if the 0/9 sequence comprises more digits than 
the matching part of
+   *     the decimal value, starts with 0, and is before the decimal point. 
Otherwise, it is
+   *     padded with spaces.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' prints '+' 
for positive values
+   *     but 'MI' prints a space.
+   *   'PR': Only allowed at the end of the format string; specifies that the 
result string will be
+   *     wrapped by angle brackets if the input value is negative.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_char(left: Column, right: Column): Column = withExpr {
+    ToCharacter(left.expr, right.expr)
+  }
+
+  /**
+   * Convert string 'expr' to a number based on the string format 'fmt'.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input string. If the 0/9 
sequence starts with
+   *     0 and is before the decimal point, it can only match a digit sequence 
of the same size.
+   *     Otherwise, if the sequence starts with 9 or is after the decimal 
point, it can match a
+   *     digit sequence that has the same or smaller size.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator. 'expr' 
must match the
+   *     grouping separator relevant for the size of the number.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' allows '-' 
but 'MI' does not.
+   *   'PR': Only allowed at the end of the format string; specifies that 
'expr' indicates a
+   *     negative number with wrapping angled brackets.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_number(left: Column, right: Column): Column = withExpr {
+    ToNumber(left.expr, right.expr)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the `fmt` expression
+   * to a timestamp without time zone. Returns null with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ltz(timestamp_str: Column, fmt: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, Some(fmt.expr), TimestampType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the default format to a 
timestamp without time zone.
+   * The default format follows casting rules to a timestamp. Returns null 
with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ltz(timestamp_str: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, None, TimestampType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the `fmt` expression
+   * to a timestamp without time zone. Returns null with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ntz(timestamp_str: Column, fmt: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, Some(fmt.expr), TimestampNTZType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the default format to a 
timestamp without time zone.
+   * The default format follows casting rules to a timestamp. Returns null 
with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ntz(timestamp_str: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, None, TimestampNTZType)
+  }
+
+  /**
+   * Returns the UNIX timestamp of the given time.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_unix_timestamp(timeExp: Column, format: Column, timeZoneId: String): 
Column = withExpr {

Review Comment:
   ```suggestion
     def to_unix_timestamp(e: Column, format: Column, timeZoneId: String): 
Column = withExpr {
   ```



##########
python/pyspark/sql/functions.py:
##########
@@ -5672,6 +5672,74 @@ def check_field(field: Union[Column, str], fieldName: 
str) -> None:
     return _invoke_function("session_window", time_col, gap_duration)
 
 
+def to_unix_timestamp(
+    col1: "ColumnOrName",
+    col2: "ColumnOrName",
+    timeZoneId: Optional[str] = None,
+) -> Column:
+    """
+    Returns the UNIX timestamp of the given time.
+
+    Examples
+    --------
+    >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
+    >>> df = spark.createDataFrame([("2016-04-08",)], ["e"])
+    >>> df.select(to_unix_timestamp(df.e, 
lit("yyyy-MM-dd")).alias('r')).collect()
+    [Row(r=1460098800)]
+    >>> spark.conf.unset("spark.sql.session.timeZone")
+    """
+    if timeZoneId is not None:
+        return _invoke_function(
+            "to_unix_timestamp", _to_java_column(col1), _to_java_column(col2), 
timeZoneId
+        )
+    else:
+        return _invoke_function("to_unix_timestamp", _to_java_column(col1), 
_to_java_column(col2))
+
+
+def to_timestamp_ltz(
+    col1: "ColumnOrName",
+    col2: Optional["ColumnOrName"] = None,

Review Comment:
   please don't use var names `col1` `col2` if they have different meaning.
   



##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {
+    new StringToMap(text.expr, pairDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column): Column = withExpr {
+    new StringToMap(text.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the supplied `fmt`.
+   * `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+   * By default, the binary format for conversion is "hex" if `fmt` is omitted.
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column, f: Column): Column = withExpr {
+    new ToBinary(e.expr, f.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the format "hex".
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column): Column = withExpr {
+    new ToBinary(e.expr)
+  }
+
+  /**
+   * Convert `numberExpr` to a string based on the `formatExpr`.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input value, generating a 
result string of the
+   *     same length as the corresponding sequence in the format string. The 
result string is
+   *     left-padded with zeros if the 0/9 sequence comprises more digits than 
the matching part of
+   *     the decimal value, starts with 0, and is before the decimal point. 
Otherwise, it is
+   *     padded with spaces.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' prints '+' 
for positive values
+   *     but 'MI' prints a space.
+   *   'PR': Only allowed at the end of the format string; specifies that the 
result string will be
+   *     wrapped by angle brackets if the input value is negative.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_char(left: Column, right: Column): Column = withExpr {
+    ToCharacter(left.expr, right.expr)
+  }
+
+  /**
+   * Convert string 'expr' to a number based on the string format 'fmt'.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input string. If the 0/9 
sequence starts with
+   *     0 and is before the decimal point, it can only match a digit sequence 
of the same size.
+   *     Otherwise, if the sequence starts with 9 or is after the decimal 
point, it can match a
+   *     digit sequence that has the same or smaller size.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator. 'expr' 
must match the
+   *     grouping separator relevant for the size of the number.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' allows '-' 
but 'MI' does not.
+   *   'PR': Only allowed at the end of the format string; specifies that 
'expr' indicates a
+   *     negative number with wrapping angled brackets.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_number(left: Column, right: Column): Column = withExpr {
+    ToNumber(left.expr, right.expr)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the `fmt` expression
+   * to a timestamp without time zone. Returns null with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ltz(timestamp_str: Column, fmt: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, Some(fmt.expr), TimestampType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the default format to a 
timestamp without time zone.
+   * The default format follows casting rules to a timestamp. Returns null 
with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ltz(timestamp_str: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, None, TimestampType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the `fmt` expression
+   * to a timestamp without time zone. Returns null with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ntz(timestamp_str: Column, fmt: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, Some(fmt.expr), TimestampNTZType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the default format to a 
timestamp without time zone.
+   * The default format follows casting rules to a timestamp. Returns null 
with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ntz(timestamp_str: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, None, TimestampNTZType)
+  }
+
+  /**
+   * Returns the UNIX timestamp of the given time.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_unix_timestamp(timeExp: Column, format: Column, timeZoneId: String): 
Column = withExpr {
+    ToUnixTimestamp(timeExp.expr, format.expr, Some(timeZoneId))
+  }
+
+  /**
+   * Returns the UNIX timestamp of the given time.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_unix_timestamp(timeExp: Column, format: Column): Column = withExpr {

Review Comment:
   ```suggestion
     def to_unix_timestamp(e: Column, format: Column): Column = withExpr {
   ```



##########
python/pyspark/sql/functions.py:
##########
@@ -5672,6 +5672,74 @@ def check_field(field: Union[Column, str], fieldName: 
str) -> None:
     return _invoke_function("session_window", time_col, gap_duration)
 
 
+def to_unix_timestamp(
+    col1: "ColumnOrName",
+    col2: "ColumnOrName",
+    timeZoneId: Optional[str] = None,
+) -> Column:
+    """
+    Returns the UNIX timestamp of the given time.

Review Comment:
   should contain `Parameters` section



##########
python/pyspark/sql/functions.py:
##########
@@ -7229,6 +7297,87 @@ def translate(srcCol: "ColumnOrName", matching: str, 
replace: str) -> Column:
     return _invoke_function("translate", _to_java_column(srcCol), matching, 
replace)
 
 
+@try_remote_functions
+def to_binary(expr: "ColumnOrName", format: Optional["ColumnOrName"]) -> 
Column:
+    """
+    Converts the input `str` to a binary value based on the supplied `fmt`.
+    `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+    By default, the binary format for conversion is "hex" if `fmt` is omitted.
+    The function returns NULL if at least one of the input parameters is NULL.
+
+    Examples
+    --------
+    >>> df = spark.createDataFrame([("abc",)], ["e"])
+    >>> df.select(to_binary(df.e, lit("utf-8")).alias('r')).collect()
+    [Row(r=bytearray(b'abc'))]
+    """
+    if format is not None:
+        return _invoke_function_over_columns("to_binary", expr, format)
+    else:
+        return _invoke_function_over_columns("to_binary", expr)
+
+
+def to_char(left: "ColumnOrName", right: "ColumnOrName") -> Column:

Review Comment:
   ```suggestion
   def to_char(col: "ColumnOrName", format: "ColumnOrName") -> Column:
   ```



##########
python/pyspark/sql/functions.py:
##########
@@ -7229,6 +7297,87 @@ def translate(srcCol: "ColumnOrName", matching: str, 
replace: str) -> Column:
     return _invoke_function("translate", _to_java_column(srcCol), matching, 
replace)
 
 
+@try_remote_functions
+def to_binary(expr: "ColumnOrName", format: Optional["ColumnOrName"]) -> 
Column:

Review Comment:
   ```suggestion
   def to_binary(col: "ColumnOrName", format: Optional["ColumnOrName"]) -> 
Column:
   ```



##########
python/pyspark/sql/functions.py:
##########
@@ -7229,6 +7297,87 @@ def translate(srcCol: "ColumnOrName", matching: str, 
replace: str) -> Column:
     return _invoke_function("translate", _to_java_column(srcCol), matching, 
replace)
 
 
+@try_remote_functions
+def to_binary(expr: "ColumnOrName", format: Optional["ColumnOrName"]) -> 
Column:
+    """
+    Converts the input `str` to a binary value based on the supplied `fmt`.
+    `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+    By default, the binary format for conversion is "hex" if `fmt` is omitted.
+    The function returns NULL if at least one of the input parameters is NULL.
+
+    Examples
+    --------
+    >>> df = spark.createDataFrame([("abc",)], ["e"])
+    >>> df.select(to_binary(df.e, lit("utf-8")).alias('r')).collect()
+    [Row(r=bytearray(b'abc'))]
+    """
+    if format is not None:
+        return _invoke_function_over_columns("to_binary", expr, format)
+    else:
+        return _invoke_function_over_columns("to_binary", expr)
+
+
+def to_char(left: "ColumnOrName", right: "ColumnOrName") -> Column:
+    """
+    Convert `numberExpr` to a string based on the `formatExpr`.
+    Throws an exception if the conversion fails. The format can consist of the 
following
+    characters, case insensitive:
+       '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+         string matches a sequence of digits in the input value, generating a 
result string of the
+         same length as the corresponding sequence in the format string. The 
result string is
+         left-padded with zeros if the 0/9 sequence comprises more digits than 
the matching part of
+         the decimal value, starts with 0, and is before the decimal point. 
Otherwise, it is
+         padded with spaces.
+       '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+       ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+         a 0 or 9 to the left and right of each grouping separator.
+       '$': Specifies the location of the $ currency sign. This character may 
only be specified
+         once.
+       'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+         the beginning or end of the format string). Note that 'S' prints '+' 
for positive values
+         but 'MI' prints a space.
+       'PR': Only allowed at the end of the format string; specifies that the 
result string will be
+         wrapped by angle brackets if the input value is negative.
+
+    Examples
+    --------
+    >>> df = spark.createDataFrame([(78.12,)], ["e"])
+    >>> df.select(to_char(df.e, lit("$99.99")).alias('r')).collect()
+    [Row(r='$78.12')]
+    """
+    return _invoke_function_over_columns("to_char", left, right)
+
+
+def to_number(left: "ColumnOrName", right: "ColumnOrName") -> Column:

Review Comment:
   ```suggestion
   def to_number(col: "ColumnOrName", format: "ColumnOrName") -> Column:
   ```



##########
sql/core/src/main/scala/org/apache/spark/sql/functions.scala:
##########
@@ -5292,6 +5292,183 @@ object functions {
    */
   def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Both `pairDelim` and `keyValueDelim` are treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column, keyValueDelim: Column): 
Column = withExpr {
+    StringToMap(text.expr, pairDelim.expr, keyValueDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiter is ':' for `keyValueDelim`.
+   * The `pairDelim` is treated as regular expressions.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column, pairDelim: Column): Column = withExpr {
+    new StringToMap(text.expr, pairDelim.expr)
+  }
+
+  /**
+   * Creates a map after splitting the text into key/value pairs using 
delimiters.
+   * Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.
+   *
+   * @group map_funcs
+   * @since 3.5.0
+   */
+  def str_to_map(text: Column): Column = withExpr {
+    new StringToMap(text.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the supplied `fmt`.
+   * `fmt` can be a case-insensitive string literal of "hex", "utf-8", "utf8", 
or "base64".
+   * By default, the binary format for conversion is "hex" if `fmt` is omitted.
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column, f: Column): Column = withExpr {
+    new ToBinary(e.expr, f.expr)
+  }
+
+  /**
+   * Converts the input `str` to a binary value based on the format "hex".
+   * The function returns NULL if at least one of the input parameters is NULL.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_binary(e: Column): Column = withExpr {
+    new ToBinary(e.expr)
+  }
+
+  /**
+   * Convert `numberExpr` to a string based on the `formatExpr`.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input value, generating a 
result string of the
+   *     same length as the corresponding sequence in the format string. The 
result string is
+   *     left-padded with zeros if the 0/9 sequence comprises more digits than 
the matching part of
+   *     the decimal value, starts with 0, and is before the decimal point. 
Otherwise, it is
+   *     padded with spaces.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' prints '+' 
for positive values
+   *     but 'MI' prints a space.
+   *   'PR': Only allowed at the end of the format string; specifies that the 
result string will be
+   *     wrapped by angle brackets if the input value is negative.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_char(left: Column, right: Column): Column = withExpr {
+    ToCharacter(left.expr, right.expr)
+  }
+
+  /**
+   * Convert string 'expr' to a number based on the string format 'fmt'.
+   * Throws an exception if the conversion fails. The format can consist of 
the following
+   * characters, case insensitive:
+   *   '0' or '9': Specifies an expected digit between 0 and 9. A sequence of 
0 or 9 in the format
+   *     string matches a sequence of digits in the input string. If the 0/9 
sequence starts with
+   *     0 and is before the decimal point, it can only match a digit sequence 
of the same size.
+   *     Otherwise, if the sequence starts with 9 or is after the decimal 
point, it can match a
+   *     digit sequence that has the same or smaller size.
+   *   '.' or 'D': Specifies the position of the decimal point (optional, only 
allowed once).
+   *   ',' or 'G': Specifies the position of the grouping (thousands) 
separator (,). There must be
+   *     a 0 or 9 to the left and right of each grouping separator. 'expr' 
must match the
+   *     grouping separator relevant for the size of the number.
+   *   '$': Specifies the location of the $ currency sign. This character may 
only be specified
+   *     once.
+   *   'S' or 'MI': Specifies the position of a '-' or '+' sign (optional, 
only allowed once at
+   *     the beginning or end of the format string). Note that 'S' allows '-' 
but 'MI' does not.
+   *   'PR': Only allowed at the end of the format string; specifies that 
'expr' indicates a
+   *     negative number with wrapping angled brackets.
+   *
+   * @group string_funcs
+   * @since 3.5.0
+   */
+  def to_number(left: Column, right: Column): Column = withExpr {
+    ToNumber(left.expr, right.expr)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the `fmt` expression
+   * to a timestamp without time zone. Returns null with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ltz(timestamp_str: Column, fmt: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, Some(fmt.expr), TimestampType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the default format to a 
timestamp without time zone.
+   * The default format follows casting rules to a timestamp. Returns null 
with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ltz(timestamp_str: Column): Column = withExpr {
+    ParseToTimestamp(timestamp_str.expr, None, TimestampType)
+  }
+
+  /**
+   * Parses the `timestamp_str` expression with the `fmt` expression
+   * to a timestamp without time zone. Returns null with invalid input.
+   *
+   * @group datetime_funcs
+   * @since 3.5.0
+   */
+  def to_timestamp_ntz(timestamp_str: Column, fmt: Column): Column = withExpr {

Review Comment:
   ```suggestion
     def to_timestamp_ntz(timestamp_str: Column, format: Column): Column = 
withExpr {
   ```



##########
python/pyspark/sql/functions.py:
##########
@@ -5672,6 +5672,74 @@ def check_field(field: Union[Column, str], fieldName: 
str) -> None:
     return _invoke_function("session_window", time_col, gap_duration)
 
 
+def to_unix_timestamp(
+    col1: "ColumnOrName",
+    col2: "ColumnOrName",

Review Comment:
   ```suggestion
   def to_unix_timestamp(
       col: "ColumnOrName",
       format: "ColumnOrName",
   ```



##########
python/pyspark/sql/functions.py:
##########
@@ -5672,6 +5672,74 @@ def check_field(field: Union[Column, str], fieldName: 
str) -> None:
     return _invoke_function("session_window", time_col, gap_duration)
 
 
+def to_unix_timestamp(
+    col1: "ColumnOrName",
+    col2: "ColumnOrName",
+    timeZoneId: Optional[str] = None,
+) -> Column:
+    """
+    Returns the UNIX timestamp of the given time.
+
+    Examples
+    --------
+    >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
+    >>> df = spark.createDataFrame([("2016-04-08",)], ["e"])
+    >>> df.select(to_unix_timestamp(df.e, 
lit("yyyy-MM-dd")).alias('r')).collect()
+    [Row(r=1460098800)]
+    >>> spark.conf.unset("spark.sql.session.timeZone")
+    """
+    if timeZoneId is not None:
+        return _invoke_function(
+            "to_unix_timestamp", _to_java_column(col1), _to_java_column(col2), 
timeZoneId
+        )
+    else:
+        return _invoke_function("to_unix_timestamp", _to_java_column(col1), 
_to_java_column(col2))
+
+
+def to_timestamp_ltz(
+    col1: "ColumnOrName",
+    col2: Optional["ColumnOrName"] = None,
+) -> Column:
+    """
+    Parses the `timestamp_str` expression with the `fmt` expression
+    to a timestamp without time zone. Returns null with invalid input.
+
+    Examples
+    --------
+    >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
+    >>> df = spark.createDataFrame([("2016-04-08",)], ["e"])
+    >>> df.select(to_timestamp_ltz(df.e, 
lit("yyyy-MM-dd")).alias('r')).collect()
+    [Row(r=datetime.datetime(2016, 4, 8, 15, 0))]
+    >>> spark.conf.unset("spark.sql.session.timeZone")
+    """
+    if col2 is not None:
+        return _invoke_function_over_columns("to_timestamp_ltz", col1, col2)
+    else:
+        return _invoke_function_over_columns("to_timestamp_ltz", col1)
+
+
+def to_timestamp_ntz(
+    col1: "ColumnOrName",
+    col2: Optional["ColumnOrName"] = None,

Review Comment:
   ditto



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to