rdblue commented on a change in pull request #23848: [SPARK-26946][SQL] 
Identifiers for multi-catalog
URL: https://github.com/apache/spark/pull/23848#discussion_r266551226
 
 

 ##########
 File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 ##########
 @@ -95,13 +96,40 @@ object AnalysisContext {
 class Analyzer(
     catalog: SessionCatalog,
     conf: SQLConf,
-    maxIterations: Int)
+    maxIterations: Int,
+    lookupCatalog: Option[(String) => CatalogPlugin] = None)
   extends RuleExecutor[LogicalPlan] with CheckAnalysis {
 
   def this(catalog: SessionCatalog, conf: SQLConf) = {
     this(catalog, conf, conf.optimizerMaxIterations)
   }
 
+  def this(lookupCatalog: Option[(String) => CatalogPlugin], catalog: 
SessionCatalog,
+      conf: SQLConf) = {
+    this(catalog, conf, conf.optimizerMaxIterations, lookupCatalog)
+  }
+
+  object CatalogRef {
+    def unapply(parts: Seq[String]): Option[(Option[CatalogPlugin], 
CatalogIdentifier)] =
 
 Review comment:
   This wasn't part of the other thread. I like using an extractor as a helper 
for writing rules. This will help us produce rules that are cleaner.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to