This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 3748b38 [SPARK-27460][TESTS][FOLLOWUP] Add HiveClientVersions to
parallel test suite list
3748b38 is described below
commit 3748b381df891ce4017e21b549588264ba1206a2
Author: Gengliang Wang <[email protected]>
AuthorDate: Thu Apr 18 15:37:55 2019 -0700
[SPARK-27460][TESTS][FOLLOWUP] Add HiveClientVersions to parallel test
suite list
## What changes were proposed in this pull request?
The test time of `HiveClientVersions` is around 3.5 minutes.
This PR is to add it into the parallel test suite list. To make sure there
is no colliding warehouse location, we can change the warehouse path to a
temporary directory.
## How was this patch tested?
Unit test
Closes #24404 from gengliangwang/parallelTestFollowUp.
Authored-by: Gengliang Wang <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
---
project/SparkBuild.scala | 1 +
.../test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala | 2 ++
2 files changed, 3 insertions(+)
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 656071d..f55f187 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -466,6 +466,7 @@ object SparkParallelTestGrouping {
"org.apache.spark.sql.hive.StatisticsSuite",
"org.apache.spark.sql.hive.execution.HiveCompatibilitySuite",
"org.apache.spark.sql.hive.client.VersionsSuite",
+ "org.apache.spark.sql.hive.client.HiveClientVersions",
"org.apache.spark.sql.hive.HiveExternalCatalogVersionsSuite",
"org.apache.spark.ml.classification.LogisticRegressionSuite",
"org.apache.spark.ml.classification.LinearSVCSuite",
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
index f3d8c2a..bda7112 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
@@ -29,6 +29,7 @@ import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types.{BooleanType, IntegerType, LongType,
StructType}
+import org.apache.spark.util.Utils
// TODO: Refactor this to `HivePartitionFilteringSuite`
class HiveClientSuite(version: String)
@@ -49,6 +50,7 @@ class HiveClientSuite(version: String)
val hadoopConf = new Configuration()
hadoopConf.setBoolean(tryDirectSqlKey, tryDirectSql)
+ hadoopConf.set("hive.metastore.warehouse.dir",
Utils.createTempDir().toURI().toString())
val client = buildClient(hadoopConf)
val tableSchema =
new StructType().add("value", "int").add("ds", "int").add("h",
"int").add("chunk", "string")
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]