maropu commented on a change in pull request #28802:
URL: https://github.com/apache/spark/pull/28802#discussion_r439206847
##########
File path:
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/TableIdentifierParserSuite.scala
##########
@@ -285,334 +290,86 @@ class TableIdentifierParserSuite extends SparkFunSuite
with SQLHelper {
"where",
"with")
- // All the keywords in `docs/sql-keywords.md` are listed below:
- val allCandidateKeywords = Set(
- "add",
- "after",
- "all",
- "alter",
- "analyze",
- "and",
- "anti",
- "any",
- "archive",
- "array",
- "as",
- "asc",
- "at",
- "authorization",
- "between",
- "both",
- "bucket",
- "buckets",
- "by",
- "cache",
- "cascade",
- "case",
- "cast",
- "change",
- "check",
- "clear",
- "cluster",
- "clustered",
- "codegen",
- "collate",
- "collection",
- "column",
- "columns",
- "comment",
- "commit",
- "compact",
- "compactions",
- "compute",
- "concatenate",
- "constraint",
- "cost",
- "create",
- "cross",
- "cube",
- "current",
- "current_date",
- "current_time",
- "current_timestamp",
- "current_user",
- "data",
- "database",
- "databases",
- "day",
- "dbproperties",
- "defined",
- "delete",
- "delimited",
- "desc",
- "describe",
- "dfs",
- "directories",
- "directory",
- "distinct",
- "distribute",
- "div",
- "drop",
- "else",
- "end",
- "escape",
- "escaped",
- "except",
- "exchange",
- "exists",
- "explain",
- "export",
- "extended",
- "external",
- "extract",
- "false",
- "fetch",
- "fields",
- "fileformat",
- "first",
- "following",
- "for",
- "foreign",
- "format",
- "formatted",
- "from",
- "full",
- "function",
- "functions",
- "global",
- "grant",
- "group",
- "grouping",
- "having",
- "hour",
- "if",
- "ignore",
- "import",
- "in",
- "index",
- "indexes",
- "inner",
- "inpath",
- "inputformat",
- "insert",
- "intersect",
- "interval",
- "into",
- "is",
- "items",
- "join",
- "keys",
- "last",
- "lateral",
- "lazy",
- "leading",
- "left",
- "like",
- "limit",
- "lines",
- "list",
- "load",
- "local",
- "location",
- "lock",
- "locks",
- "logical",
- "macro",
- "map",
- "minus",
- "minute",
- "month",
- "msck",
- "namespaces",
- "natural",
- "no",
- "not",
- "null",
- "nulls",
- "of",
- "on",
- "only",
- "option",
- "options",
- "or",
- "order",
- "out",
- "outer",
- "outputformat",
- "over",
- "overlaps",
- "overlay",
- "overwrite",
- "partition",
- "partitioned",
- "partitions",
- "percent",
- "pivot",
- "placing",
- "position",
- "preceding",
- "primary",
- "principals",
- "purge",
- "query",
- "range",
- "recordreader",
- "recordwriter",
- "recover",
- "reduce",
- "references",
- "refresh",
- "rename",
- "repair",
- "replace",
- "reset",
- "restrict",
- "revoke",
- "right",
- "rlike",
- "role",
- "roles",
- "rollback",
- "rollup",
- "row",
- "rows",
- "schema",
- "second",
- "select",
- "semi",
- "separated",
- "serde",
- "serdeproperties",
- "session_user",
- "set",
- "sets",
- "show",
- "skewed",
- "some",
- "sort",
- "sorted",
- "start",
- "statistics",
- "stored",
- "stratify",
- "struct",
- "substr",
- "substring",
- "table",
- "tables",
- "tablesample",
- "tblproperties",
- "temporary",
- "terminated",
- "then",
- "to",
- "touch",
- "trailing",
- "transaction",
- "transactions",
- "transform",
- "true",
- "truncate",
- "type",
- "unarchive",
- "unbounded",
- "uncache",
- "union",
- "unique",
- "unknown",
- "unlock",
- "unset",
- "use",
- "user",
- "using",
- "values",
- "view",
- "views",
- "when",
- "where",
- "window",
- "with",
- "year")
+ private val sqlSyntaxDefs = {
+ val sqlBasePath = {
+ val sparkHome = {
+ assert(sys.props.contains("spark.test.home") ||
+ sys.env.contains("SPARK_HOME"), "spark.test.home or SPARK_HOME is
not set.")
+ sys.props.getOrElse("spark.test.home", sys.env("SPARK_HOME"))
+ }
+ java.nio.file.Paths.get(sparkHome, "sql", "catalyst", "src", "main",
"antlr4", "org",
+ "apache", "spark", "sql", "catalyst", "parser", "SqlBase.g4").toFile
+ }
+ fileToString(sqlBasePath).split("\n")
+ }
- val reservedKeywordsInAnsiMode = Set(
- "all",
- "and",
- "anti",
- "any",
- "as",
- "authorization",
- "both",
- "case",
- "cast",
- "check",
- "collate",
- "column",
- "constraint",
- "create",
- "cross",
- "current_date",
- "current_time",
- "current_timestamp",
- "current_user",
- "day",
- "distinct",
- "else",
- "end",
- "escape",
- "except",
- "false",
- "fetch",
- "for",
- "foreign",
- "from",
- "full",
- "grant",
- "group",
- "having",
- "hour",
- "in",
- "inner",
- "intersect",
- "into",
- "join",
- "is",
- "leading",
- "left",
- "minute",
- "month",
- "natural",
- "not",
- "null",
- "on",
- "only",
- "or",
- "order",
- "outer",
- "overlaps",
- "primary",
- "references",
- "right",
- "select",
- "semi",
- "session_user",
- "minus",
- "second",
- "some",
- "table",
- "then",
- "to",
- "trailing",
- "union",
- "unique",
- "unknown",
- "user",
- "using",
- "when",
- "where",
- "with",
- "year")
+ private def parseAntlrGrammars(startTag: String, endTag: String)
+ (f: PartialFunction[String, Option[String]]): Set[String] = {
+ // We need to map a symbol string to actual literal strings
+ // in case that they have different strings.
+ val symbolsToNeedRemap = Map(
+ "DATABASES" -> Seq("DATABASES", "SCHEMAS"),
+ "RLIKE" -> Seq("RLIKE", "REGEXP"),
+ "SETMINUS" -> Seq("MINUS"),
+ "TEMPORARY" -> Seq("TEMPORARY", "TEMP")
+ )
+ val keywords = new mutable.ArrayBuffer[String]
+ val default = (_: String) => None
+ var startTagFound = false
+ var parseFinished = false
+ val lineIter = sqlSyntaxDefs.toIterator
+ while (!parseFinished && lineIter.hasNext) {
+ val line = lineIter.next()
+ if (line.trim.startsWith(startTag)) {
+ startTagFound = true
+ } else if (line.trim.startsWith(endTag)) {
+ parseFinished = true
+ } else if (startTagFound) {
+ f.applyOrElse(line, default).foreach { symbol =>
+ if (symbolsToNeedRemap.contains(symbol)) {
+ keywords ++= symbolsToNeedRemap(symbol)
+ } else {
+ keywords += symbol
+ }
+ }
+ }
+ }
+ assert(keywords.nonEmpty && startTagFound && parseFinished, "cannot
extract keywords from " +
+ s"the `SqlBase.g4` file, so please check if the start/end tags
(`$startTag` and `$endTag`) " +
+ "are placed correctly in the file.")
+ keywords.map(_.trim.toLowerCase(Locale.ROOT)).toSet
+ }
- val nonReservedKeywordsInAnsiMode = allCandidateKeywords --
reservedKeywordsInAnsiMode
+ private def parseSyntax(startTag: String, endTag: String): Set[String] = {
+ val kwDef = """\s*[\|:]\s*([A-Z_]+)\s*""".r
+ parseAntlrGrammars(startTag, endTag) {
+ // Parses a pattern, e.g., ` | AFTER`
+ case kwDef(symbol) => Some(symbol)
+ }
+ }
+
+ // All the SQL keywords defined in `SqlBase.g4`
+ val allCandidateKeywords = {
+ val kwDef = """([A-Z_]+):.+;""".r
+ val keywords = parseAntlrGrammars(
+ "//--SPARK-KEYWORD-LIST-START", "//--SPARK-KEYWORD-LIST-END") {
+ // Parses a pattern, e.g., `AFTER: 'AFTER';`
Review comment:
Two. As for the pattern, the current PR code expands "KEYWORD" to two
keywords in
https://github.com/apache/spark/pull/28802/files#diff-7e16f8441ff7d93ab9a9cf8e23c3df4cR310-R315
```
val symbolsToNeedRemap = Map(
"DATABASES" -> Seq("DATABASES", "SCHEMAS"),
"RLIKE" -> Seq("RLIKE", "REGEXP"),
"SETMINUS" -> Seq("MINUS"),
"TEMPORARY" -> Seq("TEMPORARY", "TEMP"),
"KEYWORD" -> Seq("ABC", "XYZ") <-- we need to expand it like this
)
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]