This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new b40c11f  [SPARK-35687][SQL][TEST] PythonUDFSuite move assume into its 
methods
b40c11f is described below

commit b40c11fde78c34b2ee8d4d9087eff1ebffe045a0
Author: ulysses-you <ulyssesyo...@gmail.com>
AuthorDate: Wed Jun 9 15:57:56 2021 +0900

    [SPARK-35687][SQL][TEST] PythonUDFSuite move assume into its methods
    
    ### What changes were proposed in this pull request?
    
    Move `assume` into methods at `PythonUDFSuite`.
    
    ### Why are the changes needed?
    
    When we run Spark test with such command:
    `./build/mvn -Phadoop-2.7 -Phive -Phive-thriftserver -Pyarn -Pkubernetes 
clean test`
    
    get this exception:
    ```
     PythonUDFSuite:
     org.apache.spark.sql.execution.python.PythonUDFSuite *** ABORTED ***
       java.lang.RuntimeException: Unable to load a Suite class that was 
discovered in the runpath: org.apache.spark.sql.execution.python.PythonUDFSuite
       at 
org.scalatest.tools.DiscoverySuite$.getSuiteInstance(DiscoverySuite.scala:81)
       at 
org.scalatest.tools.DiscoverySuite.$anonfun$nestedSuites$1(DiscoverySuite.scala:38)
       at 
scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
       at scala.collection.Iterator.foreach(Iterator.scala:941)
       at scala.collection.Iterator.foreach$(Iterator.scala:941)
       at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
       at scala.collection.IterableLike.foreach(IterableLike.scala:74)
       at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
       at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
       at scala.collection.TraversableLike.map(TraversableLike.scala:238)
    ```
    
    The test env has no PYSpark module so it failed.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    manual
    
    Closes #32833 from ulysses-you/SPARK-35687.
    
    Authored-by: ulysses-you <ulyssesyo...@gmail.com>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
    (cherry picked from commit 825b62086254ee5edeaf16fccf632674711b1bd8)
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 .../scala/org/apache/spark/sql/execution/python/PythonUDFSuite.scala | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/PythonUDFSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/PythonUDFSuite.scala
index 8cf1b7f..45b5720 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/PythonUDFSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/PythonUDFSuite.scala
@@ -28,7 +28,6 @@ class PythonUDFSuite extends QueryTest with 
SharedSparkSession {
 
   val scalaTestUDF = TestScalaUDF(name = "scalaUDF")
   val pythonTestUDF = TestPythonUDF(name = "pyUDF")
-  assume(shouldTestPythonUDFs)
 
   lazy val base = Seq(
     (Some(1), Some(1)), (Some(1), Some(2)), (Some(2), Some(1)),
@@ -36,6 +35,7 @@ class PythonUDFSuite extends QueryTest with 
SharedSparkSession {
     (None, Some(1)), (Some(3), None), (None, None)).toDF("a", "b")
 
   test("SPARK-28445: PythonUDF as grouping key and aggregate expressions") {
+    assume(shouldTestPythonUDFs)
     val df1 = base.groupBy(scalaTestUDF(base("a") + 1))
       .agg(scalaTestUDF(base("a") + 1), scalaTestUDF(count(base("b"))))
     val df2 = base.groupBy(pythonTestUDF(base("a") + 1))
@@ -44,6 +44,7 @@ class PythonUDFSuite extends QueryTest with 
SharedSparkSession {
   }
 
   test("SPARK-28445: PythonUDF as grouping key and used in aggregate 
expressions") {
+    assume(shouldTestPythonUDFs)
     val df1 = base.groupBy(scalaTestUDF(base("a") + 1))
       .agg(scalaTestUDF(base("a") + 1) + 1, scalaTestUDF(count(base("b"))))
     val df2 = base.groupBy(pythonTestUDF(base("a") + 1))
@@ -52,6 +53,7 @@ class PythonUDFSuite extends QueryTest with 
SharedSparkSession {
   }
 
   test("SPARK-28445: PythonUDF in aggregate expression has grouping key in its 
arguments") {
+    assume(shouldTestPythonUDFs)
     val df1 = base.groupBy(scalaTestUDF(base("a") + 1))
       .agg(scalaTestUDF(scalaTestUDF(base("a") + 1)), 
scalaTestUDF(count(base("b"))))
     val df2 = base.groupBy(pythonTestUDF(base("a") + 1))
@@ -60,6 +62,7 @@ class PythonUDFSuite extends QueryTest with 
SharedSparkSession {
   }
 
   test("SPARK-28445: PythonUDF over grouping key is argument to aggregate 
function") {
+    assume(shouldTestPythonUDFs)
     val df1 = base.groupBy(scalaTestUDF(base("a") + 1))
       .agg(scalaTestUDF(scalaTestUDF(base("a") + 1)),
         scalaTestUDF(count(scalaTestUDF(base("a") + 1))))

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to