Github user gatorsmile commented on a diff in the pull request:

    https://github.com/apache/spark/pull/14207#discussion_r71633897
  
    --- Diff: 
sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala 
---
    @@ -252,6 +252,222 @@ class DDLSuite extends QueryTest with 
SharedSQLContext with BeforeAndAfterEach {
         }
       }
     
    +  private def createDataSourceTable(
    +      path: File,
    +      userSpecifiedSchema: Option[String],
    +      userSpecifiedPartitionCols: Option[String]): (StructType, 
Seq[String]) = {
    +    var tableSchema = StructType(Nil)
    +    var partCols = Seq.empty[String]
    +
    +    val tabName = "tab1"
    +    withTable(tabName) {
    +      val partitionClause =
    +        userSpecifiedPartitionCols.map(p => s"PARTITIONED BY 
($p)").getOrElse("")
    +      val schemaClause = userSpecifiedSchema.map(s => 
s"($s)").getOrElse("")
    +      sql(
    +        s"""
    +           |CREATE TABLE $tabName $schemaClause
    +           |USING parquet
    +           |OPTIONS (
    +           |  path '$path'
    +           |)
    +           |$partitionClause
    +         """.stripMargin)
    +      val tableMetadata = 
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName))
    +
    +      tableSchema = DDLUtils.getSchemaFromTableProperties(tableMetadata)
    +      partCols = 
DDLUtils.getPartitionColumnsFromTableProperties(tableMetadata)
    +    }
    +    (tableSchema, partCols)
    +  }
    +
    +  test("Create partitioned data source table without user specified 
schema") {
    +    import testImplicits._
    +    val df = sparkContext.parallelize(1 to 10).map(i => (i, 
i.toString)).toDF("num", "str")
    +
    +    // Case 1: with partitioning columns but no schema: 
Option("inexistentColumns")
    +    // Case 2: without schema and partitioning columns: None
    +    Seq(Option("inexistentColumns"), None).foreach { partitionCols =>
    +      withTempPath { pathToPartitionedTable =>
    +        df.write.format("parquet").partitionBy("num")
    +          .save(pathToPartitionedTable.getCanonicalPath)
    +        val (tableSchema, partCols) =
    +          createDataSourceTable(
    +            pathToPartitionedTable,
    +            userSpecifiedSchema = None,
    +            userSpecifiedPartitionCols = partitionCols)
    +        assert(tableSchema ==
    +          StructType(StructField("str", StringType, nullable = true) ::
    +            StructField("num", IntegerType, nullable = true) :: Nil))
    +        assert(partCols == Seq("num"))
    +      }
    +    }
    +  }
    +
    +  test("Create partitioned data source table with user specified schema") {
    +    import testImplicits._
    +    val df = sparkContext.parallelize(1 to 10).map(i => (i, 
i.toString)).toDF("num", "str")
    +
    +    // Case 1: with partitioning columns but no schema: Option("num")
    +    // Case 2: without schema and partitioning columns: None
    +    Seq(Option("num"), None).foreach { partitionCols =>
    +      withTempPath { pathToPartitionedTable =>
    +        df.write.format("parquet").partitionBy("num")
    +          .save(pathToPartitionedTable.getCanonicalPath)
    +        val (tableSchema, partCols) =
    +          createDataSourceTable(
    +            pathToPartitionedTable,
    +            userSpecifiedSchema = Option("num int, str string"),
    +            userSpecifiedPartitionCols = partitionCols)
    +        assert(tableSchema ==
    +          StructType(StructField("num", IntegerType, nullable = true) ::
    +            StructField("str", StringType, nullable = true) :: Nil))
    +        assert(partCols.mkString(", ") == partitionCols.getOrElse(""))
    +      }
    +    }
    +  }
    +
    +  test("Create non-partitioned data source table without user specified 
schema") {
    +    import testImplicits._
    +    val df = sparkContext.parallelize(1 to 10).map(i => (i, 
i.toString)).toDF("num", "str")
    +
    +    // Case 1: with partitioning columns but no schema: 
Option("inexistentColumns")
    +    // Case 2: without schema and partitioning columns: None
    +    Seq(Option("inexistentColumns"), None).foreach { partitionCols =>
    +      withTempPath { pathToNonPartitionedTable =>
    +        
df.write.format("parquet").save(pathToNonPartitionedTable.getCanonicalPath)
    +        val (tableSchema, partCols) =
    +          createDataSourceTable(
    +            pathToNonPartitionedTable,
    +            userSpecifiedSchema = None,
    +            userSpecifiedPartitionCols = partitionCols)
    +        assert(tableSchema ==
    +          StructType(StructField("num", IntegerType, nullable = true) ::
    +            StructField("str", StringType, nullable = true) :: Nil))
    +        assert(partCols.isEmpty)
    +      }
    +    }
    +  }
    +
    +  test("Create non-partitioned data source table with user specified 
schema") {
    +    import testImplicits._
    +    val df = sparkContext.parallelize(1 to 10).map(i => (i, 
i.toString)).toDF("num", "str")
    +
    +    // Case 1: with partitioning columns but no schema: 
Option("inexistentColumns")
    +    // Case 2: without schema and partitioning columns: None
    +    Seq(Option("num"), None).foreach { partitionCols =>
    +      withTempPath { pathToNonPartitionedTable =>
    +        
df.write.format("parquet").save(pathToNonPartitionedTable.getCanonicalPath)
    +        val (tableSchema, partCols) =
    +          createDataSourceTable(
    +            pathToNonPartitionedTable,
    +            userSpecifiedSchema = Option("num int, str string"),
    +            userSpecifiedPartitionCols = partitionCols)
    +        assert(tableSchema ==
    +          StructType(StructField("num", IntegerType, nullable = true) ::
    +            StructField("str", StringType, nullable = true) :: Nil))
    +        assert(partCols.mkString(", ") == partitionCols.getOrElse(""))
    +      }
    +    }
    +  }
    +
    +  test("Describe Table with Corrupted Schema") {
    +    import testImplicits._
    +
    +    val tabName = "tab1"
    +    withTempPath { dir =>
    +      val path = dir.getCanonicalPath
    +      val df = sparkContext.parallelize(1 to 10).map(i => (i, 
i.toString)).toDF("col1", "col2")
    +      df.write.format("json").save(path)
    +
    +      withTable(tabName) {
    +        sql(
    +          s"""
    +             |CREATE TABLE $tabName
    +             |USING json
    +             |OPTIONS (
    +             |  path '$path'
    +             |)
    +           """.stripMargin)
    +
    +        val catalog = spark.sessionState.catalog
    +        val table = catalog.getTableMetadata(TableIdentifier(tabName))
    +        val newProperties = table.properties.filterKeys(key =>
    +          key != CreateDataSourceTableUtils.DATASOURCE_SCHEMA_NUMPARTS)
    +        val newTable = table.copy(properties = newProperties)
    +        catalog.alterTable(newTable)
    +
    +        val e = intercept[AnalysisException] {
    +          sql(s"DESC $tabName")
    +        }.getMessage
    +        assert(e.contains(s"Could not read schema from the metastore 
because it is corrupted"))
    +      }
    +    }
    +  }
    +
    +  test("Refresh table after changing the data source table partitioning") {
    +    import testImplicits._
    +
    +    val tabName = "tab1"
    +    val catalog = spark.sessionState.catalog
    +    withTempPath { dir =>
    +      val path = dir.getCanonicalPath
    +      val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString, 
i, i))
    +        .toDF("col1", "col2", "col3", "col4")
    +      df.write.format("json").partitionBy("col1", "col3").save(path)
    +      val schema = StructType(
    +        StructField("col2", StringType, nullable = true) ::
    +        StructField("col4", LongType, nullable = true) ::
    +        StructField("col1", IntegerType, nullable = true) ::
    +        StructField("col3", IntegerType, nullable = true) :: Nil)
    +      val partitionCols = Seq("col1", "col3")
    +
    +      // Ensure the schema is split to multiple properties.
    +      withSQLConf(SQLConf.SCHEMA_STRING_LENGTH_THRESHOLD.key -> "1") {
    --- End diff --
    
    Previously, we used this to verify whether the refresh works well when the 
table schema is split to multiple properties. Now, since we do not need to 
refresh the schema, we can remove it. Thanks!


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to