jamisonbennett commented on a change in pull request #23398: [SPARK-26493][SQL] 
Allow multiple spark.sql.extensions
URL: https://github.com/apache/spark/pull/23398#discussion_r246395549
 
 

 ##########
 File path: 
sql/core/src/test/scala/org/apache/spark/sql/SparkSessionExtensionSuite.scala
 ##########
 @@ -108,12 +124,86 @@ class SparkSessionExtensionSuite extends SparkFunSuite {
     try {
       
assert(session.sessionState.planner.strategies.contains(MySparkStrategy(session)))
       
assert(session.sessionState.analyzer.extendedResolutionRules.contains(MyRule(session)))
+      
assert(session.sessionState.analyzer.postHocResolutionRules.contains(MyRule(session)))
+      
assert(session.sessionState.analyzer.extendedCheckRules.contains(MyCheckRule(session)))
+      
assert(session.sessionState.optimizer.batches.flatMap(_.rules).contains(MyRule(session)))
+      assert(session.sessionState.sqlParser.isInstanceOf[MyParser])
+      assert(session.sessionState.functionRegistry
+        .lookupFunction(MyExtensions.myFunction._1).isDefined)
+    } finally {
+      stop(session)
+    }
+  }
+
+  test("use multiple custom class for extensions in the specified order") {
+    val session = SparkSession.builder()
+      .master("local[1]")
+      .config("spark.sql.extensions", Seq(
+        classOf[MyExtensions2].getCanonicalName,
+        classOf[MyExtensions].getCanonicalName).mkString(","))
+      .getOrCreate()
+    try {
+      assert(session.sessionState.planner.strategies.containsSlice(
+        Seq(MySparkStrategy2(session), MySparkStrategy(session))))
+      val orderedRules = Seq(MyRule2(session), MyRule(session))
+      val orderedCheckRules = Seq(MyCheckRule2(session), MyCheckRule(session))
+      val parser = MyParser(session, CatalystSqlParser)
+      
assert(session.sessionState.analyzer.extendedResolutionRules.containsSlice(orderedRules))
+      
assert(session.sessionState.analyzer.postHocResolutionRules.containsSlice(orderedRules))
+      
assert(session.sessionState.analyzer.extendedCheckRules.containsSlice(orderedCheckRules))
+      
assert(session.sessionState.optimizer.batches.flatMap(_.rules).filter(orderedRules.contains)
+        .containsSlice(orderedRules ++ orderedRules)) // The optimizer rules 
are duplicated
+      assert(session.sessionState.sqlParser == parser)
 
 Review comment:
   Based on 
[databricks/scala-style-guide#36](https://github.com/databricks/scala-style-guide/issues/36),
 it looks like `==` might now be preferred over `===`. For what its worth, it 
seems that in the cases for this test `==` produces reasonable error messages 
such as 
`MyParser(org.apache.spark.sql.SparkSession@6e8a9c30,org.apache.spark.sql.catalyst.parser.CatalystSqlParser$@5d01ea21)
 did not equal IntentionalErrorThatIInsertedHere` and `2 did not equal 3`. So 
please let me know if there is newer guidance to use `===` and I can make the 
changes.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to