Repository: spark
Updated Branches:
  refs/heads/master 1c70b7650 -> 20ddf5fdd


http://git-wip-us.apache.org/repos/asf/spark/blob/20ddf5fd/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
index cc41224..92f424b 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
@@ -222,7 +222,7 @@ class OrcQuerySuite extends QueryTest with 
BeforeAndAfterAll with OrcTest {
       sql("INSERT INTO TABLE t SELECT * FROM tmp")
       checkAnswer(table("t"), (data ++ data).map(Row.fromTuple))
     }
-    sessionState.catalog.unregisterTable(TableIdentifier("tmp"))
+    sessionState.catalog.dropTable(TableIdentifier("tmp"), ignoreIfNotExists = 
true)
   }
 
   test("overwriting") {
@@ -232,7 +232,7 @@ class OrcQuerySuite extends QueryTest with 
BeforeAndAfterAll with OrcTest {
       sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
       checkAnswer(table("t"), data.map(Row.fromTuple))
     }
-    sessionState.catalog.unregisterTable(TableIdentifier("tmp"))
+    sessionState.catalog.dropTable(TableIdentifier("tmp"), ignoreIfNotExists = 
true)
   }
 
   test("self-join") {

http://git-wip-us.apache.org/repos/asf/spark/blob/20ddf5fd/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala
index fe44677..bdd3428 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala
@@ -68,8 +68,12 @@ abstract class OrcSuite extends QueryTest with 
TestHiveSingleton with BeforeAndA
   }
 
   override def afterAll(): Unit = {
-    orcTableDir.delete()
-    orcTableAsDir.delete()
+    try {
+      orcTableDir.delete()
+      orcTableAsDir.delete()
+    } finally {
+      super.afterAll()
+    }
   }
 
   test("create temporary orc table") {

http://git-wip-us.apache.org/repos/asf/spark/blob/20ddf5fd/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
index bb53179..b6fc61d 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.hive
 import java.io.File
 
 import org.apache.spark.sql._
+import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.execution.DataSourceScan
 import org.apache.spark.sql.execution.command.ExecutedCommand
 import org.apache.spark.sql.execution.datasources.{InsertIntoDataSource, 
InsertIntoHadoopFsRelation, LogicalRelation}
@@ -425,10 +426,9 @@ class ParquetMetastoreSuite extends 
ParquetPartitioningTest {
   }
 
   test("Caching converted data source Parquet Relations") {
-    val _catalog = sessionState.catalog
-    def checkCached(tableIdentifier: _catalog.QualifiedTableName): Unit = {
+    def checkCached(tableIdentifier: TableIdentifier): Unit = {
       // Converted test_parquet should be cached.
-      
sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier) match 
{
+      sessionState.catalog.getCachedDataSourceTable(tableIdentifier) match {
         case null => fail("Converted test_parquet should be cached in the 
cache.")
         case logical @ LogicalRelation(parquetRelation: HadoopFsRelation, _, 
_) => // OK
         case other =>
@@ -453,17 +453,17 @@ class ParquetMetastoreSuite extends 
ParquetPartitioningTest {
         |  OUTPUTFORMAT 
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
       """.stripMargin)
 
-    var tableIdentifier = _catalog.QualifiedTableName("default", 
"test_insert_parquet")
+    var tableIdentifier = TableIdentifier("test_insert_parquet", 
Some("default"))
 
     // First, make sure the converted test_parquet is not cached.
-    
assert(sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier)
 === null)
+    assert(sessionState.catalog.getCachedDataSourceTable(tableIdentifier) === 
null)
     // Table lookup will make the table cached.
     table("test_insert_parquet")
     checkCached(tableIdentifier)
     // For insert into non-partitioned table, we will do the conversion,
     // so the converted test_insert_parquet should be cached.
     invalidateTable("test_insert_parquet")
-    
assert(sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier)
 === null)
+    assert(sessionState.catalog.getCachedDataSourceTable(tableIdentifier) === 
null)
     sql(
       """
         |INSERT INTO TABLE test_insert_parquet
@@ -476,7 +476,7 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest 
{
       sql("select a, b from jt").collect())
     // Invalidate the cache.
     invalidateTable("test_insert_parquet")
-    
assert(sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier)
 === null)
+    assert(sessionState.catalog.getCachedDataSourceTable(tableIdentifier) === 
null)
 
     // Create a partitioned table.
     sql(
@@ -493,8 +493,8 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest 
{
         |  OUTPUTFORMAT 
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
       """.stripMargin)
 
-    tableIdentifier = _catalog.QualifiedTableName("default", 
"test_parquet_partitioned_cache_test")
-    
assert(sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier)
 === null)
+    tableIdentifier = TableIdentifier("test_parquet_partitioned_cache_test", 
Some("default"))
+    assert(sessionState.catalog.getCachedDataSourceTable(tableIdentifier) === 
null)
     sql(
       """
         |INSERT INTO TABLE test_parquet_partitioned_cache_test
@@ -503,14 +503,14 @@ class ParquetMetastoreSuite extends 
ParquetPartitioningTest {
       """.stripMargin)
     // Right now, insert into a partitioned Parquet is not supported in data 
source Parquet.
     // So, we expect it is not cached.
-    
assert(sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier)
 === null)
+    assert(sessionState.catalog.getCachedDataSourceTable(tableIdentifier) === 
null)
     sql(
       """
         |INSERT INTO TABLE test_parquet_partitioned_cache_test
         |PARTITION (`date`='2015-04-02')
         |select a, b from jt
       """.stripMargin)
-    
assert(sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier)
 === null)
+    assert(sessionState.catalog.getCachedDataSourceTable(tableIdentifier) === 
null)
 
     // Make sure we can cache the partitioned table.
     table("test_parquet_partitioned_cache_test")
@@ -526,7 +526,7 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest 
{
         """.stripMargin).collect())
 
     invalidateTable("test_parquet_partitioned_cache_test")
-    
assert(sessionState.catalog.cachedDataSourceTables.getIfPresent(tableIdentifier)
 === null)
+    assert(sessionState.catalog.getCachedDataSourceTable(tableIdentifier) === 
null)
 
     dropTables("test_insert_parquet", "test_parquet_partitioned_cache_test")
   }
@@ -700,6 +700,7 @@ abstract class ParquetPartitioningTest extends QueryTest 
with SQLTestUtils with
   var partitionedTableDirWithKeyAndComplexTypes: File = null
 
   override def beforeAll(): Unit = {
+    super.beforeAll()
     partitionedTableDir = Utils.createTempDir()
     normalTableDir = Utils.createTempDir()
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to