cloud-fan commented on a change in pull request #25077: [SPARK-28301][SQL] fix
the behavior of table name resolution with multi-catalog
URL: https://github.com/apache/spark/pull/25077#discussion_r301076229
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
##########
@@ -752,29 +732,21 @@ class Analyzer(
// and the default database is only used to look up a view);
// 3. Use the currentDb of the SessionCatalog.
private def lookupTableFromCatalog(
- tableIdentifier: TableIdentifier,
+ ident: Seq[String],
u: UnresolvedRelation,
defaultDatabase: Option[String] = None): LogicalPlan = {
- val tableIdentWithDb = tableIdentifier.copy(
- database = tableIdentifier.database.orElse(defaultDatabase))
+ val identWithDb = if (ident.length == 1) {
+ defaultDatabase.toSeq ++ ident
+ } else {
+ ident
+ }
try {
- catalog.lookupRelation(tableIdentWithDb)
+ catalog.lookupRelation(identWithDb)
} catch {
case _: NoSuchTableException | _: NoSuchDatabaseException =>
u
}
}
-
- // If the database part is specified, and we support running SQL directly
on files, and
Review comment:
This is not needed anymore because in
https://github.com/apache/spark/pull/24741 we delay the error reporting of
unresolved relation to `CheckAnalysis`
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]