beliefer commented on a change in pull request #27507:
URL: https://github.com/apache/spark/pull/27507#discussion_r463060500



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
##########
@@ -518,3 +546,106 @@ case class RegExpExtract(subject: Expression, regexp: 
Expression, idx: Expressio
     })
   }
 }
+
+/**
+ * Extract all specific(idx) groups identified by a Java regex.
+ *
+ * NOTE: this expression is not THREAD-SAFE, as it has some internal mutable 
status.
+ */
+@ExpressionDescription(
+  usage = """
+    _FUNC_(str, regexp[, idx]) - Extract all strings in the `str` that match 
the `regexp`
+    expression and corresponding to the regex group index.
+  """,
+  arguments = """
+    Arguments:
+      * str - a string expression.
+      * regexp - a string representing a regular expression. The regex string 
should be a
+          Java regular expression.
+
+          Since Spark 2.0, string literals (including regex patterns) are 
unescaped in our SQL
+          parser. For example, to match "\abc", a regular expression for 
`regexp` can be
+          "^\\abc$".
+
+          There is a SQL config 'spark.sql.parser.escapedStringLiterals' that 
can be used to
+          fallback to the Spark 1.6 behavior regarding string literal parsing. 
For example,
+          if the config is enabled, the `regexp` that can match "\abc" is 
"^\abc$".
+      * idx - an integer expression that representing the group index. The 
regex may contains
+          multiple groups. `idx` indicates which regex group to extract. The 
group index should
+          be non-negative. If `idx` is not specified, the default group index 
value is 1. The
+          `idx` parameter is the Java regex Matcher group() method index. See
+          docs/api/java/util/regex/Matcher.html for more information on the 
`idx` or Java regex
+          group() method.
+  """,
+  examples = """
+    Examples:
+      > SELECT _FUNC_('100-200, 300-400', '(\\d+)-(\\d+)', 1);
+       ["100","300"]
+  """,
+  since = "3.0.0")
+case class RegExpExtractAll(subject: Expression, regexp: Expression, idx: 
Expression)
+  extends RegExpExtractBase {
+  def this(s: Expression, r: Expression) = this(s, r, Literal(1))
+
+  override def nullSafeEval(s: Any, p: Any, r: Any): Any = {
+    val m = getLastMatcher(s, p)
+    val matchResults = new ArrayBuffer[UTF8String]()
+    while(m.find) {
+      val mr: MatchResult = m.toMatchResult
+      val index = r.asInstanceOf[Int]
+      RegExpExtractBase.checkGroupIndex(mr.groupCount, index)
+      val group = mr.group(index)
+      if (group == null) { // Pattern matched, but not optional group

Review comment:
       I just reference 
https://github.com/apache/spark/blob/5250f988a32a8b5599f6038702da7e31dcbaccd8/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala#L497




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to