Github user gatorsmile commented on a diff in the pull request:
https://github.com/apache/spark/pull/18421#discussion_r128296684
--- Diff:
sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala ---
@@ -182,6 +183,189 @@ class StatisticsSuite extends
StatisticsCollectionTestBase with TestHiveSingleto
}
}
+ test("analyze single partition") {
+ val tableName = "analyzeTable_part"
+
+ def queryStats(ds: String): CatalogStatistics = {
+ val partition =
+
spark.sessionState.catalog.getPartition(TableIdentifier(tableName), Map("ds" ->
ds))
+ partition.stats.get
+ }
+
+ def createPartition(ds: String, query: String): Unit = {
+ sql(s"INSERT INTO TABLE $tableName PARTITION (ds='$ds') $query")
+ }
+
+ withTable(tableName) {
+ sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED
BY (ds STRING)")
+
+ createPartition("2010-01-01", "SELECT '1', 'A' from src")
+ createPartition("2010-01-02", "SELECT '1', 'A' from src UNION ALL
SELECT '1', 'A' from src")
+ createPartition("2010-01-03", "SELECT '1', 'A' from src")
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE
STATISTICS NOSCAN")
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE
STATISTICS NOSCAN")
+
+ assert(queryStats("2010-01-01").rowCount === None)
+ assert(queryStats("2010-01-01").sizeInBytes === 2000)
+
+ assert(queryStats("2010-01-02").rowCount === None)
+ assert(queryStats("2010-01-02").sizeInBytes === 2*2000)
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE
STATISTICS")
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE
STATISTICS")
+
+ assert(queryStats("2010-01-01").rowCount.get === 500)
+ assert(queryStats("2010-01-01").sizeInBytes === 2000)
+
+ assert(queryStats("2010-01-02").rowCount.get === 2*500)
+ assert(queryStats("2010-01-02").sizeInBytes === 2*2000)
+ }
+ }
+
+ test("analyze a set of partitions") {
+ val tableName = "analyzeTable_part"
+
+ def queryStats(ds: String, hr: String): Option[CatalogStatistics] = {
+ val tableId = TableIdentifier(tableName)
+ val partition =
+ spark.sessionState.catalog.getPartition(tableId, Map("ds" -> ds,
"hr" -> hr))
+ partition.stats
+ }
+
+ def assertPartitionStats(
+ ds: String,
+ hr: String,
+ rowCount: Option[BigInt],
+ sizeInBytes: BigInt): Unit = {
+ val stats = queryStats(ds, hr).get
+ assert(stats.rowCount === rowCount)
+ assert(stats.sizeInBytes === sizeInBytes)
+ }
+
+ def createPartition(ds: String, hr: Int, query: String): Unit = {
+ sql(s"INSERT INTO TABLE $tableName PARTITION (ds='$ds', hr=$hr)
$query")
+ }
+
+ withTable(tableName) {
+ sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED
BY (ds STRING, hr INT)")
+
+ createPartition("2010-01-01", 10, "SELECT '1', 'A' from src")
+ createPartition("2010-01-01", 11, "SELECT '1', 'A' from src")
+ createPartition("2010-01-02", 10, "SELECT '1', 'A' from src")
+ createPartition("2010-01-02", 11,
+ "SELECT '1', 'A' from src UNION ALL SELECT '1', 'A' from src")
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE
STATISTICS NOSCAN")
+
+ assertPartitionStats("2010-01-01", "10", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = None,
sizeInBytes = 2000)
+ assert(queryStats("2010-01-02", "10") === None)
+ assert(queryStats("2010-01-02", "11") === None)
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE
STATISTICS NOSCAN")
+
+ assertPartitionStats("2010-01-01", "10", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "10", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "11", rowCount = None,
sizeInBytes = 2*2000)
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-01') COMPUTE
STATISTICS")
+
+ assertPartitionStats("2010-01-01", "10", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "10", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "11", rowCount = None,
sizeInBytes = 2*2000)
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds='2010-01-02') COMPUTE
STATISTICS")
+
+ assertPartitionStats("2010-01-01", "10", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "10", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "11", rowCount = Some(2*500),
sizeInBytes = 2*2000)
+ }
+ }
+
+ test("analyze all partitions") {
+ val tableName = "analyzeTable_part"
+
+ def assertPartitionStats(
+ ds: String,
+ hr: String,
+ rowCount: Option[BigInt],
+ sizeInBytes: BigInt): Unit = {
+ val stats =
spark.sessionState.catalog.getPartition(TableIdentifier(tableName),
+ Map("ds" -> ds, "hr" -> hr)).stats.get
+ assert(stats.rowCount === rowCount)
+ assert(stats.sizeInBytes === sizeInBytes)
+ }
+
+ def createPartition(ds: String, hr: Int, query: String): Unit = {
+ sql(s"INSERT INTO TABLE $tableName PARTITION (ds='$ds', hr=$hr)
$query")
+ }
+
+ withTable(tableName) {
+ sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED
BY (ds STRING, hr INT)")
+
+ createPartition("2010-01-01", 10, "SELECT '1', 'A' from src")
+ createPartition("2010-01-01", 11, "SELECT '1', 'A' from src")
+ createPartition("2010-01-02", 10, "SELECT '1', 'A' from src")
+ createPartition("2010-01-02", 11,
+ "SELECT '1', 'A' from src UNION ALL SELECT '1', 'A' from src")
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds, hr) COMPUTE STATISTICS
NOSCAN")
+
+ assertPartitionStats("2010-01-01", "10", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = None,
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "11", rowCount = None,
sizeInBytes = 2*2000)
+
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds, hr) COMPUTE
STATISTICS")
+
+ assertPartitionStats("2010-01-01", "10", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-01", "11", rowCount = Some(500),
sizeInBytes = 2000)
+ assertPartitionStats("2010-01-02", "11", rowCount = Some(2*500),
sizeInBytes = 2*2000)
+ }
+ }
+
+ test("analyze partitions for an empty table") {
+ val tableName = "analyzeTable_part"
+
+ withTable(tableName) {
+ sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED
BY (ds STRING)")
+
+ // make sure there is no exception
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds) COMPUTE STATISTICS
NOSCAN")
+
+ // make sure there is no exception
+ sql(s"ANALYZE TABLE $tableName PARTITION (ds) COMPUTE STATISTICS")
+ }
+ }
+
+ test("analyze non-existent partition") {
+ val tableName = "analyzeTable_part"
+ withTable(tableName) {
+ sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED
BY (ds STRING)")
+
+ sql(s"INSERT INTO TABLE $tableName PARTITION (ds='2010-01-01')
SELECT * FROM src")
+
+ intercept[AnalysisException] {
+ sql(s"ANALYZE TABLE $tableName PARTITION (hour=20) COMPUTE
STATISTICS")
+ }
+
+ intercept[AnalysisException] {
--- End diff --
Could you capture all these error messages? We always need to check whether
the error messages make sense.
```Scala
val message = intercept[AnalysisException] {
...
}.getMessage
assert(message.contains("XYZ"))
```
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]