Repository: spark
Updated Branches:
  refs/heads/branch-1.4 f6a29c72c -> e79ecc7dc


[SQL] [TEST] udf_java_method failed due to jdk version

java.lang.Math.exp(1.0) has different result between jdk versions. so do not 
use createQueryTest, write a separate test for it.
```
jdk version     result
1.7.0_11                2.7182818284590455
1.7.0_05        2.7182818284590455
1.7.0_71                2.718281828459045
```

Author: scwf <[email protected]>

Closes #6274 from scwf/java_method and squashes the following commits:

3dd2516 [scwf] address comments
5fa1459 [scwf] style
df46445 [scwf] fix test error
fcb6d22 [scwf] fix udf_java_method

(cherry picked from commit f6c486aa4b0d3a50b53c110fd63d226fffeb87f7)
Signed-off-by: Michael Armbrust <[email protected]>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e79ecc7d
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e79ecc7d
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e79ecc7d

Branch: refs/heads/branch-1.4
Commit: e79ecc7dc3664356dc2d805a0f43f81024ea59c5
Parents: f6a29c7
Author: scwf <[email protected]>
Authored: Thu May 21 12:31:58 2015 -0700
Committer: Michael Armbrust <[email protected]>
Committed: Thu May 21 12:32:10 2015 -0700

----------------------------------------------------------------------
 .../hive/execution/HiveCompatibilitySuite.scala |  6 +++--
 .../sql/hive/execution/HiveQuerySuite.scala     |  7 ++----
 .../sql/hive/execution/SQLQuerySuite.scala      | 23 ++++++++++++++++++++
 3 files changed, 29 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/e79ecc7d/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
 
b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
index b6245a5..0b1917a 100644
--- 
a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
+++ 
b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
@@ -250,7 +250,10 @@ class HiveCompatibilitySuite extends HiveQueryFileTest 
with BeforeAndAfter {
 
     // The isolated classloader seemed to make some of our test reset 
mechanisms less robust.
     "combine1", // This test changes compression settings in a way that breaks 
all subsequent tests.
-    "load_dyn_part14.*" // These work alone but fail when run with other 
tests...
+    "load_dyn_part14.*", // These work alone but fail when run with other 
tests...
+
+    // the answer is sensitive for jdk version
+    "udf_java_method"
   ) ++ HiveShim.compatibilityBlackList
 
   /**
@@ -877,7 +880,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with 
BeforeAndAfter {
     "udf_int",
     "udf_isnotnull",
     "udf_isnull",
-    "udf_java_method",
     "udf_lcase",
     "udf_length",
     "udf_lessthan",

http://git-wip-us.apache.org/repos/asf/spark/blob/e79ecc7d/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index e7aec0b..65c6ef0 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -20,13 +20,10 @@ package org.apache.spark.sql.hive.execution
 import java.io.File
 import java.util.{Locale, TimeZone}
 
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF
-import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
-import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorFactory, 
StructObjectInspector, ObjectInspector}
-import org.scalatest.BeforeAndAfter
-
 import scala.util.Try
 
+import org.scalatest.BeforeAndAfter
+
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars
 
 import org.apache.spark.{SparkFiles, SparkException}

http://git-wip-us.apache.org/repos/asf/spark/blob/e79ecc7d/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index fbbf6ba..ba53ed9 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -814,4 +814,27 @@ class SQLQuerySuite extends QueryTest {
       sql("SELECT cast(key+2 as Int) from df_analysis A group by cast(key+1 as 
int)")
     }
   }
+
+  // `Math.exp(1.0)` has different result for different jdk version, so not 
use createQueryTest
+  test("udf_java_method") {
+    checkAnswer(sql(
+      """
+        |SELECT java_method("java.lang.String", "valueOf", 1),
+        |       java_method("java.lang.String", "isEmpty"),
+        |       java_method("java.lang.Math", "max", 2, 3),
+        |       java_method("java.lang.Math", "min", 2, 3),
+        |       java_method("java.lang.Math", "round", 2.5),
+        |       java_method("java.lang.Math", "exp", 1.0),
+        |       java_method("java.lang.Math", "floor", 1.9)
+        |FROM src tablesample (1 rows)
+      """.stripMargin),
+      Row(
+        "1",
+        "true",
+        java.lang.Math.max(2, 3).toString,
+        java.lang.Math.min(2, 3).toString,
+        java.lang.Math.round(2.5).toString,
+        java.lang.Math.exp(1.0).toString,
+        java.lang.Math.floor(1.9).toString))
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to