Github user wzhfy commented on a diff in the pull request:

    https://github.com/apache/spark/pull/14971#discussion_r117172781
  
    --- Diff: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala ---
    @@ -215,6 +218,217 @@ class StatisticsSuite extends 
StatisticsCollectionTestBase with TestHiveSingleto
         }
       }
     
    +  private def createNonPartitionedTable(
    +      tabName: String,
    +      analyzedBySpark: Boolean = true,
    +      analyzedByHive: Boolean = true): Unit = {
    +    val hiveClient = 
spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
    +    sql(
    +      s"""
    +         |CREATE TABLE $tabName (key STRING, value STRING)
    +         |STORED AS TEXTFILE
    +         |TBLPROPERTIES ('prop1' = 'val1', 'prop2' = 'val2')
    +       """.stripMargin)
    +    sql(s"INSERT INTO TABLE $tabName SELECT * FROM src")
    +    if (analyzedBySpark) sql(s"ANALYZE TABLE $tabName COMPUTE STATISTICS")
    +    // This is to mimic the scenario in which Hive genrates statistics 
before we reading it
    +    if (analyzedByHive) hiveClient.runSqlHive(s"ANALYZE TABLE $tabName 
COMPUTE STATISTICS")
    +    val describeResult1 = hiveClient.runSqlHive(s"DESCRIBE FORMATTED 
$tabName")
    +
    +    val tableMetadata =
    +      
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName)).properties
    +    // statistics info is not contained in the metadata of the original 
table
    +    assert(Seq(StatsSetupConst.COLUMN_STATS_ACCURATE,
    +      StatsSetupConst.NUM_FILES,
    +      StatsSetupConst.NUM_PARTITIONS,
    +      StatsSetupConst.ROW_COUNT,
    +      StatsSetupConst.RAW_DATA_SIZE,
    +      StatsSetupConst.TOTAL_SIZE).forall(!tableMetadata.contains(_)))
    +
    +    if (analyzedByHive) {
    +      assert(StringUtils.filterPattern(describeResult1, 
"*numRows\\s+500*").nonEmpty)
    +    } else {
    +      assert(StringUtils.filterPattern(describeResult1, 
"*numRows\\s+500*").isEmpty)
    +    }
    +  }
    +
    +  private def extractStatsPropValues(
    +      descOutput: Seq[String],
    +      propKey: String): Option[BigInt] = {
    +    val str = descOutput
    +      .filterNot(_.contains(HiveExternalCatalog.STATISTICS_PREFIX))
    +      .filter(_.contains(propKey))
    +    if (str.isEmpty) {
    +      None
    +    } else {
    +      assert(str.length == 1, "found more than one matches")
    +      val pattern = new Regex(s"""$propKey\\s+(-?\\d+)""")
    +      val pattern(value) = str.head.trim
    +      Option(BigInt(value))
    +    }
    +  }
    +
    +  test("get statistics when not analyzed in both Hive and Spark") {
    +    val tabName = "tab1"
    +    withTable(tabName) {
    +      createNonPartitionedTable(tabName, analyzedByHive = false, 
analyzedBySpark = false)
    +      checkTableStats(
    +        tabName, hasSizeInBytes = true, expectedRowCounts = None)
    +
    +      // ALTER TABLE SET TBLPROPERTIES invalidates some contents of Hive 
specific statistics
    +      // This is triggered by the Hive alterTable API
    +      val hiveClient = 
spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
    +      val describeResult = hiveClient.runSqlHive(s"DESCRIBE FORMATTED 
$tabName")
    +
    +      val rawDataSize = extractStatsPropValues(describeResult, 
"rawDataSize")
    +      val numRows = extractStatsPropValues(describeResult, "numRows")
    +      val totalSize = extractStatsPropValues(describeResult, "totalSize")
    +      assert(rawDataSize.isEmpty, "rawDataSize should not be shown without 
table analysis")
    +      assert(numRows.isEmpty, "numRows should not be shown without table 
analysis")
    +      assert(totalSize.isDefined && totalSize.get > 0, "totalSize is lost")
    +    }
    +  }
    +
    +  test("alter table rename after analyze table") {
    +    Seq(true, false).foreach { analyzedBySpark =>
    +      val oldName = "tab1"
    +      val newName = "tab2"
    +      withTable(oldName, newName) {
    +        createNonPartitionedTable(oldName, analyzedByHive = true, 
analyzedBySpark = analyzedBySpark)
    +        val fetchedStats1 = checkTableStats(
    +          oldName, hasSizeInBytes = true, expectedRowCounts = Some(500))
    +        sql(s"ALTER TABLE $oldName RENAME TO $newName")
    +        val fetchedStats2 = checkTableStats(
    +          newName, hasSizeInBytes = true, expectedRowCounts = Some(500))
    +        assert(fetchedStats1 == fetchedStats2)
    +
    +        // ALTER TABLE RENAME does not affect the contents of Hive 
specific statistics
    +        val hiveClient = 
spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
    +        val describeResult = hiveClient.runSqlHive(s"DESCRIBE FORMATTED 
$newName")
    +
    +        val rawDataSize = extractStatsPropValues(describeResult, 
"rawDataSize")
    +        val numRows = extractStatsPropValues(describeResult, "numRows")
    +        val totalSize = extractStatsPropValues(describeResult, "totalSize")
    +        assert(rawDataSize.isDefined && rawDataSize.get > 0, "rawDataSize 
is lost")
    +        assert(numRows.isDefined && numRows.get == 500, "numRows is lost")
    +        assert(totalSize.isDefined && totalSize.get > 0, "totalSize is 
lost")
    +      }
    +    }
    +  }
    +
    +  test("alter table SET TBLPROPERTIES after analyze table") {
    +    Seq(true, false).foreach { analyzedBySpark =>
    +      val tabName = "tab1"
    +      withTable(tabName) {
    +        createNonPartitionedTable(tabName, analyzedByHive = true, 
analyzedBySpark = analyzedBySpark)
    +        val fetchedStats1 = checkTableStats(
    +          tabName, hasSizeInBytes = true, expectedRowCounts = Some(500))
    +        sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('foo' = 'a')")
    +        val fetchedStats2 = checkTableStats(
    +          tabName, hasSizeInBytes = true, expectedRowCounts = Some(500))
    +        assert(fetchedStats1 == fetchedStats2)
    +
    +        val hiveClient = 
spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
    +        val describeResult = hiveClient.runSqlHive(s"DESCRIBE FORMATTED 
$tabName")
    +
    +        val totalSize = extractStatsPropValues(describeResult, "totalSize")
    +        assert(totalSize.isDefined && totalSize.get > 0, "totalSize is 
lost")
    +
    +        // ALTER TABLE SET TBLPROPERTIES invalidates some Hive specific 
statistics
    +        // This is triggered by the Hive alterTable API
    +        val numRows = extractStatsPropValues(describeResult, "numRows")
    +        assert(numRows.isDefined && numRows.get == -1, "numRows is lost")
    +        val rawDataSize = extractStatsPropValues(describeResult, 
"rawDataSize")
    +        assert(rawDataSize.isDefined && rawDataSize.get == -1, 
"rawDataSize is lost")
    +      }
    +    }
    +  }
    +
    +  test("alter table UNSET TBLPROPERTIES after analyze table") {
    +    Seq(true, false).foreach { analyzedBySpark =>
    +      val tabName = "tab1"
    +      withTable(tabName) {
    +        createNonPartitionedTable(tabName, analyzedByHive = true, 
analyzedBySpark = analyzedBySpark)
    +        val fetchedStats1 = checkTableStats(
    +          tabName, hasSizeInBytes = true, expectedRowCounts = Some(500))
    +        sql(s"ALTER TABLE $tabName UNSET TBLPROPERTIES ('prop1')")
    +        val fetchedStats2 = checkTableStats(
    +          tabName, hasSizeInBytes = true, expectedRowCounts = Some(500))
    +        assert(fetchedStats1 == fetchedStats2)
    +
    +        val hiveClient = 
spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
    +        val describeResult = hiveClient.runSqlHive(s"DESCRIBE FORMATTED 
$tabName")
    +
    +        val totalSize = extractStatsPropValues(describeResult, "totalSize")
    +        assert(totalSize.isDefined && totalSize.get > 0, "totalSize is 
lost")
    +
    +        // ALTER TABLE UNSET TBLPROPERTIES invalidates some Hive specific 
statistics
    +        // This is triggered by the Hive alterTable API
    +        val numRows = extractStatsPropValues(describeResult, "numRows")
    +        assert(numRows.isDefined && numRows.get == -1, "numRows is lost")
    +        val rawDataSize = extractStatsPropValues(describeResult, 
"rawDataSize")
    +        assert(rawDataSize.isDefined && rawDataSize.get == -1, 
"rawDataSize is lost")
    +      }
    +    }
    +  }
    +
    +  test("add/drop partitions - managed table") {
    +    val catalog = spark.sessionState.catalog
    +    val managedTable = "partitionedTable"
    +    withTable(managedTable) {
    +      sql(
    +        s"""
    +           |CREATE TABLE $managedTable (key INT, value STRING)
    +           |PARTITIONED BY (ds STRING, hr STRING)
    +         """.stripMargin)
    +
    +      for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
    +        sql(
    +          s"""
    +             |INSERT OVERWRITE TABLE $managedTable
    +             |partition (ds='$ds',hr='$hr')
    +             |SELECT 1, 'a'
    +           """.stripMargin)
    +      }
    +
    +      checkTableStats(
    +        managedTable, hasSizeInBytes = false, expectedRowCounts = None)
    +
    +      sql(s"ANALYZE TABLE $managedTable COMPUTE STATISTICS")
    +
    +      val stats1 = checkTableStats(
    +        managedTable, hasSizeInBytes = true, expectedRowCounts = Some(4))
    +
    +      sql(
    +        s"""
    +           |ALTER TABLE $managedTable DROP PARTITION (ds='2008-04-08'),
    +           |PARTITION (hr='12')
    +        """.stripMargin)
    +      
assert(catalog.listPartitions(TableIdentifier(managedTable)).map(_.spec).toSet 
==
    +        Set(Map("ds" -> "2008-04-09", "hr" -> "11")))
    +
    +      val stats2 = checkTableStats(
    +        managedTable, hasSizeInBytes = true, expectedRowCounts = Some(4))
    +      assert(stats1 == stats2)
    +
    +      sql(s"ANALYZE TABLE $managedTable COMPUTE STATISTICS")
    +
    +      val stats3 = checkTableStats(
    +        managedTable, hasSizeInBytes = true, expectedRowCounts = Some(1))
    +      assert(stats2.get.sizeInBytes > stats3.get.sizeInBytes)
    +
    +      sql(s"ALTER TABLE $managedTable ADD PARTITION (ds='2008-04-08', 
hr='12')")
    +      assert(stats1 == stats2)
    --- End diff --
    
    redundant?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to