Github user gatorsmile commented on a diff in the pull request:

    https://github.com/apache/spark/pull/18421#discussion_r132099901
  
    --- Diff: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala ---
    @@ -256,6 +257,222 @@ class StatisticsSuite extends 
StatisticsCollectionTestBase with TestHiveSingleto
         }
       }
     
    +  test("analyze single partition") {
    +    val tableName = "analyzeTable_part"
    +
    +    def queryStats(ds: String): CatalogStatistics = {
    +      val partition =
    +        
spark.sessionState.catalog.getPartition(TableIdentifier(tableName), Map("ds" -> 
ds))
    +      partition.stats.get
    +    }
    +
    +    def createPartition(ds: String, query: String): Unit = {
    +      sql(s"INSERT INTO TABLE $tableName PARTITION (ds='$ds') $query")
    +    }
    +
    +    withTable(tableName) {
    +      sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED 
BY (ds STRING)")
    +
    +      createPartition("2010-01-01", "SELECT '1', 'A' from src")
    +      createPartition("2010-01-02", "SELECT '1', 'A' from src UNION ALL 
SELECT '1', 'A' from src")
    +      createPartition("2010-01-03", "SELECT '1', 'A' from src")
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE 
STATISTICS NOSCAN")
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE 
STATISTICS NOSCAN")
    +
    +      assert(queryStats("2010-01-01").rowCount === None)
    +      assert(queryStats("2010-01-01").sizeInBytes === 2000)
    +
    +      assert(queryStats("2010-01-02").rowCount === None)
    +      assert(queryStats("2010-01-02").sizeInBytes === 2*2000)
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE 
STATISTICS")
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE 
STATISTICS")
    +
    +      assert(queryStats("2010-01-01").rowCount.get === 500)
    +      assert(queryStats("2010-01-01").sizeInBytes === 2000)
    +
    +      assert(queryStats("2010-01-02").rowCount.get === 2*500)
    +      assert(queryStats("2010-01-02").sizeInBytes === 2*2000)
    +    }
    +  }
    +
    +  test("analyze a set of partitions") {
    +    val tableName = "analyzeTable_part"
    +
    +    def queryStats(ds: String, hr: String): Option[CatalogStatistics] = {
    +      val tableId = TableIdentifier(tableName)
    +      val partition =
    +        spark.sessionState.catalog.getPartition(tableId, Map("ds" -> ds, 
"hr" -> hr))
    +      partition.stats
    +    }
    +
    +    def assertPartitionStats(
    +        ds: String,
    +        hr: String,
    +        rowCount: Option[BigInt],
    +        sizeInBytes: BigInt): Unit = {
    +      val stats = queryStats(ds, hr).get
    +      assert(stats.rowCount === rowCount)
    +      assert(stats.sizeInBytes === sizeInBytes)
    +    }
    +
    +    def createPartition(ds: String, hr: Int, query: String): Unit = {
    +      sql(s"INSERT INTO TABLE $tableName PARTITION (ds='$ds', hr=$hr) 
$query")
    +    }
    +
    +    withTable(tableName) {
    +      sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED 
BY (ds STRING, hr INT)")
    +
    +      createPartition("2010-01-01", 10, "SELECT '1', 'A' from src")
    +      createPartition("2010-01-01", 11, "SELECT '1', 'A' from src")
    +      createPartition("2010-01-02", 10, "SELECT '1', 'A' from src")
    +      createPartition("2010-01-02", 11,
    +        "SELECT '1', 'A' from src UNION ALL SELECT '1', 'A' from src")
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE 
STATISTICS NOSCAN")
    +
    +      assertPartitionStats("2010-01-01", "10", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = None, 
sizeInBytes = 2000)
    +      assert(queryStats("2010-01-02", "10") === None)
    +      assert(queryStats("2010-01-02", "11") === None)
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE 
STATISTICS NOSCAN")
    +
    +      assertPartitionStats("2010-01-01", "10", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "10", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "11", rowCount = None, 
sizeInBytes = 2*2000)
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE 
STATISTICS")
    +
    +      assertPartitionStats("2010-01-01", "10", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "10", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "11", rowCount = None, 
sizeInBytes = 2*2000)
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE 
STATISTICS")
    +
    +      assertPartitionStats("2010-01-01", "10", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "10", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "11", rowCount = Some(2*500), 
sizeInBytes = 2*2000)
    +    }
    +  }
    +
    +  test("analyze all partitions") {
    +    val tableName = "analyzeTable_part"
    +
    +    def assertPartitionStats(
    +        ds: String,
    +        hr: String,
    +        rowCount: Option[BigInt],
    +        sizeInBytes: BigInt): Unit = {
    +      val stats = 
spark.sessionState.catalog.getPartition(TableIdentifier(tableName),
    +        Map("ds" -> ds, "hr" -> hr)).stats.get
    +      assert(stats.rowCount === rowCount)
    +      assert(stats.sizeInBytes === sizeInBytes)
    +    }
    +
    +    def createPartition(ds: String, hr: Int, query: String): Unit = {
    +      sql(s"INSERT INTO TABLE $tableName PARTITION (ds='$ds', hr=$hr) 
$query")
    +    }
    +
    +    withTable(tableName) {
    +      sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED 
BY (ds STRING, hr INT)")
    +
    +      createPartition("2010-01-01", 10, "SELECT '1', 'A' from src")
    +      createPartition("2010-01-01", 11, "SELECT '1', 'A' from src")
    +      createPartition("2010-01-02", 10, "SELECT '1', 'A' from src")
    +      createPartition("2010-01-02", 11,
    +        "SELECT '1', 'A' from src UNION ALL SELECT '1', 'A' from src")
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds, hr) COMPUTE STATISTICS 
NOSCAN")
    +
    +      assertPartitionStats("2010-01-01", "10", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = None, 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "11", rowCount = None, 
sizeInBytes = 2*2000)
    +
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds, hr) COMPUTE 
STATISTICS")
    +
    +      assertPartitionStats("2010-01-01", "10", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-01", "11", rowCount = Some(500), 
sizeInBytes = 2000)
    +      assertPartitionStats("2010-01-02", "11", rowCount = Some(2*500), 
sizeInBytes = 2*2000)
    +    }
    +  }
    +
    +  test("analyze partitions for an empty table") {
    +    val tableName = "analyzeTable_part"
    +
    +    withTable(tableName) {
    +      sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED 
BY (ds STRING)")
    +
    +      // make sure there is no exception
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds) COMPUTE STATISTICS 
NOSCAN")
    +
    +      // make sure there is no exception
    +      sql(s"ANALYZE TABLE $tableName PARTITION (ds) COMPUTE STATISTICS")
    +    }
    +  }
    +
    +  test("analyze partitions case sensitivity") {
    +    val tableName = "analyzeTable_part"
    +    withTable(tableName) {
    +      sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED 
BY (ds STRING)")
    +
    +      sql(s"INSERT INTO TABLE $tableName PARTITION (ds='2010-01-01') 
SELECT * FROM src")
    +
    +      withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
    +        sql(s"ANALYZE TABLE $tableName PARTITION (DS='2010-01-01') COMPUTE 
STATISTICS")
    +      }
    +
    +      withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
    +        val message = intercept[AnalysisException] {
    +          sql(s"ANALYZE TABLE $tableName PARTITION (DS='2010-01-01') 
COMPUTE STATISTICS")
    +        }.getMessage
    +        assert(message.contains(s"Partition specification for table 
'${tableName.toLowerCase}' " +
    +            "in database 'default' refers to unknown partition column(s): 
DS"))
    --- End diff --
    
    Nit: Indent.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to