MaxGekk commented on a change in pull request #31131:
URL: https://github.com/apache/spark/pull/31131#discussion_r555280134
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala
##########
@@ -43,6 +44,28 @@ trait AlterTableDropPartitionSuiteBase extends
command.AlterTableDropPartitionSu
checkPartitions(t) // no partitions
}
}
+
+ test("SPARK-34060, SPARK-34071: update stats of cached table") {
+ withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> "true") {
+ withNamespaceAndTable("ns", "tbl") { t =>
+ sql(s"CREATE TABLE $t (id int, part int) $defaultUsing PARTITIONED BY
(part)")
+ sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0")
+ sql(s"INSERT INTO $t PARTITION (part=1) SELECT 1")
+ assert(!spark.catalog.isCached(t))
+ sql(s"CACHE TABLE $t")
+ assert(spark.catalog.isCached(t))
+ checkAnswer(sql(s"SELECT * FROM $t"), Seq(Row(0, 0), Row(1, 1)))
+ val twoPartSize = getTableSize(t)
+ assert(twoPartSize > 0)
+
+ sql(s"ALTER TABLE $t DROP PARTITION (part=0)")
+ assert(spark.catalog.isCached(t))
+ val onePartSize = getTableSize(t)
+ assert(0 < onePartSize && onePartSize < twoPartSize)
Review comment:
> does getTableSize look at cached table for stats
No, it doesn't but updating stats uncached table before the PR
https://github.com/apache/spark/pull/31112
Actually, I moved the test from Hive specific test suite to the base test
suite for v1 catalogs. Here, I made the test portable because table data has
different sizes, so, instead of comparing exact numbers, I replaced that by
this check (which is independent from table size).
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]