This is an automated email from the ASF dual-hosted git repository.
lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git
The following commit(s) were added to refs/heads/master by this push:
new 1f395422a0 [core] Add UT for expire partitions with hms config
metastore.partitioned-table (#4875)
1f395422a0 is described below
commit 1f395422a029c5600089ba67220e91e190439ca8
Author: xuzifu666 <[email protected]>
AuthorDate: Thu Jan 9 19:23:21 2025 +0800
[core] Add UT for expire partitions with hms config
metastore.partitioned-table (#4875)
---
.../spark/sql/DDLWithHiveCatalogTestBase.scala | 48 ++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala
index b90fe86549..0c3db9a20d 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala
@@ -134,6 +134,54 @@ abstract class DDLWithHiveCatalogTestBase extends
PaimonHiveTestBase {
}
}
+ test(
+ "Paimon partition expire test with hive catalog: expire partition for
paimon table sparkCatalogName") {
+ spark.sql(s"USE $paimonHiveCatalogName")
+ withTempDir {
+ dBLocation =>
+ withDatabase("paimon_db") {
+ spark.sql(s"CREATE DATABASE paimon_db LOCATION
'${dBLocation.getCanonicalPath}'")
+ withTable("paimon_db.paimon_tbl") {
+ spark.sql(s"""
+ |CREATE TABLE paimon_db.paimon_tbl (id STRING, name
STRING, pt STRING)
+ |USING PAIMON
+ |PARTITIONED BY (pt)
+ |TBLPROPERTIES('metastore.partitioned-table' =
'false')
+ |""".stripMargin)
+ spark.sql("insert into paimon_db.paimon_tbl select '1', 'n',
'2024-11-01'")
+ spark.sql("insert into paimon_db.paimon_tbl select '2', 'n',
'9999-11-01'")
+
+ spark.sql(
+ "CALL paimon_hive.sys.expire_partitions(table =>
'paimon_db.paimon_tbl', expiration_time => '1 d', timestamp_formatter =>
'yyyy-MM-dd')")
+
+ checkAnswer(
+ spark.sql("SELECT * FROM paimon_db.paimon_tbl"),
+ Row("2", "n", "9999-11-01") :: Nil)
+ }
+
+ withTable("paimon_db.paimon_tbl2") {
+ spark.sql(s"""
+ |CREATE TABLE paimon_db.paimon_tbl2 (id STRING, name
STRING, pt STRING)
+ |USING PAIMON
+ |PARTITIONED BY (pt)
+ |TBLPROPERTIES('metastore.partitioned-table' = 'true')
+ |""".stripMargin)
+ spark.sql("insert into paimon_db.paimon_tbl2 select '1', 'n',
'2024-11-01'")
+
+ spark.sql("insert into paimon_db.paimon_tbl2 select '2', 'n',
'9999-11-01'")
+
+ spark.sql(
+ "CALL paimon_hive.sys.expire_partitions(table =>
'paimon_db.paimon_tbl2', expiration_time => '1 d', timestamp_formatter =>
'yyyy-MM-dd')")
+
+ checkAnswer(
+ spark.sql("SELECT * FROM paimon_db.paimon_tbl2"),
+ Row("2", "n", "9999-11-01") :: Nil)
+ }
+
+ }
+ }
+ }
+
test("Paimon DDL with hive catalog: create partition for paimon table
sparkCatalogName") {
Seq(paimonHiveCatalogName).foreach {
catalogName =>