tomvanbussel commented on code in PR #36936:
URL: https://github.com/apache/spark/pull/36936#discussion_r908095046


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala:
##########
@@ -895,7 +905,7 @@ case class ShowTablesCommand(
       val normalizedSpec = PartitioningUtils.normalizePartitionSpec(
         partitionSpec.get,
         table.partitionSchema,
-        tableIdent.quotedString,
+        tableIdent.quotedString(SESSION_CATALOG_NAME),
         sparkSession.sessionState.conf.resolver)
       val partition = catalog.getPartition(tableIdent, normalizedSpec)
       val database = tableIdent.database.getOrElse("")

Review Comment:
   Do we also want to add the catalog name to the output of this function?



##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/identifiers.scala:
##########
@@ -33,15 +36,34 @@ sealed trait IdentifierWithDatabase {
    */
   private def quoteIdentifier(name: String): String = name.replace("`", "``")
 
-  def quotedString: String = {
-    val replacedId = quoteIdentifier(identifier)
-    val replacedDb = database.map(quoteIdentifier(_))
+  def quotedString: String = quotedString(None)
+  def quotedString(catalog: String): String = quotedString(Some(catalog))
 
-    if (replacedDb.isDefined) s"`${replacedDb.get}`.`$replacedId`" else 
s"`$replacedId`"
+  def quotedString(catalog: Option[String]): String = {

Review Comment:
   I don't think we should add these new methods. These methods make it easier 
to omit the catalog than to include it. I'm worried that this leads to most new 
code (and some existing code) not including the catalog. AFAIK it's always safe 
to include the catalog name if the database name is set (as in that case it 
won't be a local temporary view), so the following should work:
   ```scala
   def quotedString: String = {
     val replacedId = quoteIdentifier(identifier)
     val replacedDb = database.map(quoteIdentifier)
     if (database.isDefined) {
       if (SQLConf.get.getConf(LEGACY_NON_IDENTIFIER_OUTPUT_CATALOG_NAME)) {
         s"`${replacedDb.get}`.`$replacedId`"
       } else {
         s"`$SESSION_CATALOG_NAME`.`${replacedDb.get}`.`$replacedId`"
       }
     } else {
       s"`$replacedId`"
     }
   }
   



##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala:
##########
@@ -234,7 +235,7 @@ case class UnresolvedGenerator(name: FunctionIdentifier, 
children: Seq[Expressio
   override def nullable: Boolean = throw new UnresolvedException("nullable")
   override lazy val resolved = false
 
-  override def prettyName: String = name.unquotedString
+  override def prettyName: String = name.unquotedString(SESSION_CATALOG_NAME)
   override def toString: String = s"'$name(${children.mkString(", ")})"

Review Comment:
   This will include the unquoted identifier *without* the catalog, as the 
`toString` method of `FunctionIdentifier` is implemented as `unquotedString`. 
Is this intentional?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to