Github user kunalkhamar commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16826#discussion_r103305709
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala ---
    @@ -90,110 +208,29 @@ private[sql] class SessionState(sparkSession: 
SparkSession) {
         }
       }
     
    -  /**
    -   * Internal catalog for managing table and database states.
    -   */
    -  lazy val catalog = new SessionCatalog(
    -    sparkSession.sharedState.externalCatalog,
    -    sparkSession.sharedState.globalTempViewManager,
    -    functionResourceLoader,
    -    functionRegistry,
    -    conf,
    -    newHadoopConf(),
    -    sqlParser)
    +  def newHadoopConf(copyHadoopConf: Configuration, sqlConf: SQLConf): 
Configuration = {
    +    val hadoopConf = new Configuration(copyHadoopConf)
    +    sqlConf.getAllConfs.foreach { case (k, v) => if (v ne null) 
hadoopConf.set(k, v) }
    +    hadoopConf
    +  }
     
    -  /**
    -   * Interface exposed to the user for registering user-defined functions.
    -   * Note that the user-defined functions must be deterministic.
    -   */
    -  lazy val udf: UDFRegistration = new UDFRegistration(functionRegistry)
    +  def createAnalyzer(
    +      sparkSession: SparkSession,
    +      catalog: SessionCatalog,
    +      sqlConf: SQLConf): Analyzer = {
     
    -  /**
    -   * Logical query plan analyzer for resolving unresolved attributes and 
relations.
    -   */
    -  lazy val analyzer: Analyzer = {
    -    new Analyzer(catalog, conf) {
    +    new Analyzer(catalog, sqlConf) {
           override val extendedResolutionRules =
             new FindDataSourceTable(sparkSession) ::
             new ResolveSQLOnFile(sparkSession) :: Nil
     
           override val postHocResolutionRules =
             PreprocessTableCreation(sparkSession) ::
    -        PreprocessTableInsertion(conf) ::
    -        DataSourceAnalysis(conf) :: Nil
    +        PreprocessTableInsertion(sqlConf) ::
    +        DataSourceAnalysis(sqlConf) :: Nil
     
           override val extendedCheckRules = Seq(PreWriteCheck, HiveOnlyCheck)
         }
       }
     
    -  /**
    -   * Logical query plan optimizer.
    -   */
    -  lazy val optimizer: Optimizer = new SparkOptimizer(catalog, conf, 
experimentalMethods)
    -
    -  /**
    -   * Parser that extracts expressions, plans, table identifiers etc. from 
SQL texts.
    -   */
    -  lazy val sqlParser: ParserInterface = new SparkSqlParser(conf)
    -
    -  /**
    -   * Planner that converts optimized logical plans to physical plans.
    -   */
    -  def planner: SparkPlanner =
    -    new SparkPlanner(sparkSession.sparkContext, conf, 
experimentalMethods.extraStrategies)
    -
    -  /**
    -   * An interface to register custom 
[[org.apache.spark.sql.util.QueryExecutionListener]]s
    -   * that listen for execution metrics.
    -   */
    -  lazy val listenerManager: ExecutionListenerManager = new 
ExecutionListenerManager
    -
    -  /**
    -   * Interface to start and stop [[StreamingQuery]]s.
    -   */
    -  lazy val streamingQueryManager: StreamingQueryManager = {
    -    new StreamingQueryManager(sparkSession)
    -  }
    -
    -  private val jarClassLoader: NonClosableMutableURLClassLoader =
    -    sparkSession.sharedState.jarClassLoader
    -
    -  // Automatically extract all entries and put it in our SQLConf
    -  // We need to call it after all of vals have been initialized.
    -  sparkSession.sparkContext.getConf.getAll.foreach { case (k, v) =>
    -    conf.setConfString(k, v)
    -  }
    -
    -  // ------------------------------------------------------
    -  //  Helper methods, partially leftover from pre-2.0 days
    -  // ------------------------------------------------------
    -
    -  def executePlan(plan: LogicalPlan): QueryExecution = new 
QueryExecution(sparkSession, plan)
    -
    -  def refreshTable(tableName: String): Unit = {
    -    catalog.refreshTable(sqlParser.parseTableIdentifier(tableName))
    -  }
    -
    -  def addJar(path: String): Unit = {
    -    sparkSession.sparkContext.addJar(path)
    -
    -    val uri = new Path(path).toUri
    -    val jarURL = if (uri.getScheme == null) {
    -      // `path` is a local file path without a URL scheme
    -      new File(path).toURI.toURL
    -    } else {
    -      // `path` is a URL with a scheme
    -      uri.toURL
    -    }
    -    jarClassLoader.addURL(jarURL)
    -    Thread.currentThread().setContextClassLoader(jarClassLoader)
    -  }
    -
    -  /**
    -   * Analyzes the given table in the current database to generate 
statistics, which will be
    -   * used in query optimizations.
    -   */
    -  def analyze(tableIdent: TableIdentifier, noscan: Boolean = true): Unit = 
{
    --- End diff --
    
    Cool.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to