Github user rdblue commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21978#discussion_r237585203
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/identifiers.scala ---
    @@ -18,48 +18,106 @@
     package org.apache.spark.sql.catalyst
     
     /**
    - * An identifier that optionally specifies a database.
    + * An identifier that optionally specifies a database and catalog.
      *
      * Format (unquoted): "name" or "db.name"
      * Format (quoted): "`name`" or "`db`.`name`"
      */
    -sealed trait IdentifierWithDatabase {
    +sealed trait IdentifierWithOptionalDatabaseAndCatalog {
       val identifier: String
     
       def database: Option[String]
     
    +  def catalog: Option[String]
    +
       /*
        * Escapes back-ticks within the identifier name with double-back-ticks.
        */
       private def quoteIdentifier(name: String): String = name.replace("`", 
"``")
     
       def quotedString: String = {
    -    val replacedId = quoteIdentifier(identifier)
    -    val replacedDb = database.map(quoteIdentifier(_))
    -
    -    if (replacedDb.isDefined) s"`${replacedDb.get}`.`$replacedId`" else 
s"`$replacedId`"
    +    // database is required if catalog is present
    +    assert(database.isDefined || catalog.isEmpty)
    +    def q(s: String): String = s"`${quoteIdentifier(s)}`"
    +    Seq(catalog.map(q), database.map(q), 
Some(q(identifier))).flatten.mkString(".")
       }
     
       def unquotedString: String = {
    -    if (database.isDefined) s"${database.get}.$identifier" else identifier
    +    Seq(catalog, database, Some(identifier)).flatten.mkString(".")
       }
     
       override def toString: String = quotedString
     }
     
     
    +object CatalogTableIdentifier {
    +  def apply(table: String): CatalogTableIdentifier =
    +    new CatalogTableIdentifier(table, None, None)
    +
    +  def apply(table: String, database: String): CatalogTableIdentifier =
    +    new CatalogTableIdentifier(table, Some(database), None)
    +
    +  def apply(table: String, database: String, catalog: String): 
CatalogTableIdentifier =
    +    new CatalogTableIdentifier(table, Some(database), Some(catalog))
    +}
    +
     /**
    - * Identifies a table in a database.
    - * If `database` is not defined, the current database is used.
    - * When we register a permanent function in the FunctionRegistry, we use
    - * unquotedString as the function name.
    + * Identifies a table in a database and catalog.
    + * If `database` is not defined, the current catalog's default database is 
used.
    + * If `catalog` is not defined, the current catalog is used.
    --- End diff --
    
    No, we want to move away from a special global catalog. I think that Spark 
should have a current catalog, like a current database, which is used to 
resolve references that don't have an explicit catalog. That would have a 
default, just like the current database has a default.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to