This is an automated email from the ASF dual-hosted git repository.

lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new daaca523cf [test] Add format end to Spark migrate file test
daaca523cf is described below

commit daaca523cf97de33c188ecbf9830d3ec8e4957bc
Author: JingsongLi <jingsongl...@gmail.com>
AuthorDate: Mon Mar 17 23:10:55 2025 +0800

    [test] Add format end to Spark migrate file test
---
 .../spark/procedure/MigrateFileProcedureTest.scala | 84 ++++++++++++----------
 1 file changed, 47 insertions(+), 37 deletions(-)

diff --git 
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateFileProcedureTest.scala
 
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateFileProcedureTest.scala
index 492350a0f7..0121fd6c39 100644
--- 
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateFileProcedureTest.scala
+++ 
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateFileProcedureTest.scala
@@ -31,29 +31,31 @@ class MigrateFileProcedureTest extends PaimonHiveTestBase {
   Seq("parquet", "orc", "avro").foreach(
     format => {
       test(s"Paimon migrate file procedure: migrate $format non-partitioned 
table") {
-        withTable(s"hive_tbl1$random", s"paimon_tbl1$random") {
+        withTable(s"hive_tbl1$random$format", s"paimon_tbl1$random$format") {
           // create hive table
           spark.sql(s"""
-                       |CREATE TABLE hive_tbl1$random (id STRING, name STRING, 
pt STRING)
+                       |CREATE TABLE hive_tbl1$random$format (id STRING, name 
STRING, pt STRING)
                        |USING $format
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO hive_tbl1$random VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
+          spark.sql(
+            s"INSERT INTO hive_tbl1$random$format VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
 
           // create paimon table
           spark.sql(s"""
-                       |CREATE TABLE paimon_tbl1$random (id STRING, name 
STRING, pt STRING)
+                       |CREATE TABLE paimon_tbl1$random$format (id STRING, 
name STRING, pt STRING)
                        |USING PAIMON
                        |TBLPROPERTIES ('file.format'='$format', 'bucket'='-1')
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO paimon_tbl1$random VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
+          spark.sql(
+            s"INSERT INTO paimon_tbl1$random$format VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
 
           spark.sql(
-            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl1$random', target_table => 
'$hiveDbName.paimon_tbl1$random')")
+            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl1$random$format', target_table => 
'$hiveDbName.paimon_tbl1$random$format')")
 
           checkAnswer(
-            spark.sql(s"SELECT * FROM paimon_tbl1$random ORDER BY id"),
+            spark.sql(s"SELECT * FROM paimon_tbl1$random$format ORDER BY id"),
             Row("1", "a", "p1") :: Row("2", "b", "p2") :: Row("3", "c", "p1") 
:: Row(
               "4",
               "d",
@@ -66,29 +68,31 @@ class MigrateFileProcedureTest extends PaimonHiveTestBase {
     format => {
       test(
         s"Paimon migrate file procedure: migrate $format non-partitioned table 
with parallelism") {
-        withTable(s"hive_tbl_02$random", s"paimon_tbl_02$random") {
+        withTable(s"hive_tbl_02$random$format", 
s"paimon_tbl_02$random$format") {
           // create hive table
           spark.sql(s"""
-                       |CREATE TABLE hive_tbl_02$random (id STRING, name 
STRING, pt STRING)
+                       |CREATE TABLE hive_tbl_02$random$format (id STRING, 
name STRING, pt STRING)
                        |USING $format
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO hive_tbl_02$random VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
+          spark.sql(
+            s"INSERT INTO hive_tbl_02$random$format VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
 
           // create paimon table
           spark.sql(s"""
-                       |CREATE TABLE paimon_tbl_02$random (id STRING, name 
STRING, pt STRING)
+                       |CREATE TABLE paimon_tbl_02$random$format (id STRING, 
name STRING, pt STRING)
                        |USING PAIMON
                        |TBLPROPERTIES ('file.format'='$format', 'bucket'='-1')
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO paimon_tbl_02$random VALUES ('3', 'c', 
'p1'), ('4', 'd', 'p2')")
+          spark.sql(
+            s"INSERT INTO paimon_tbl_02$random$format VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
 
           spark.sql(
-            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl_02$random', target_table => 
'$hiveDbName.paimon_tbl_02$random', parallelism => 6)")
+            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl_02$random$format', target_table => 
'$hiveDbName.paimon_tbl_02$random$format', parallelism => 6)")
 
           checkAnswer(
-            spark.sql(s"SELECT * FROM paimon_tbl_02$random ORDER BY id"),
+            spark.sql(s"SELECT * FROM paimon_tbl_02$random$format ORDER BY 
id"),
             Row("1", "a", "p1") :: Row("2", "b", "p2") :: Row("3", "c", "p1") 
:: Row(
               "4",
               "d",
@@ -101,31 +105,33 @@ class MigrateFileProcedureTest extends PaimonHiveTestBase 
{
     format => {
       test(
         s"Paimon migrate file procedure: migrate $format non-partitioned table 
with delete source table") {
-        withTable(s"hive_tbl3$random", s"paimon_tbl3$random") {
+        withTable(s"hive_tbl3$random$format", s"paimon_tbl3$random$format") {
           // create hive table
           spark.sql(s"""
-                       |CREATE TABLE hive_tbl3$random (id STRING, name STRING, 
pt STRING)
+                       |CREATE TABLE hive_tbl3$random$format (id STRING, name 
STRING, pt STRING)
                        |USING $format
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO hive_tbl3$random VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
+          spark.sql(
+            s"INSERT INTO hive_tbl3$random$format VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
 
           // create paimon table
           spark.sql(s"""
-                       |CREATE TABLE paimon_tbl3$random (id STRING, name 
STRING, pt STRING)
+                       |CREATE TABLE paimon_tbl3$random$format (id STRING, 
name STRING, pt STRING)
                        |USING PAIMON
                        |TBLPROPERTIES ('file.format'='$format', 'bucket'='-1')
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO paimon_tbl3$random VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
+          spark.sql(
+            s"INSERT INTO paimon_tbl3$random$format VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
 
           spark.sql(
-            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl3$random', target_table => 
'$hiveDbName.paimon_tbl3$random', delete_origin => false)")
+            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl3$random$format', target_table => 
'$hiveDbName.paimon_tbl3$random$format', delete_origin => false)")
 
-          checkAnswer(spark.sql(s"SELECT * FROM hive_tbl3$random ORDER BY 
id"), Nil)
+          checkAnswer(spark.sql(s"SELECT * FROM hive_tbl3$random$format ORDER 
BY id"), Nil)
 
           checkAnswer(
-            spark.sql(s"SELECT * FROM paimon_tbl3$random ORDER BY id"),
+            spark.sql(s"SELECT * FROM paimon_tbl3$random$format ORDER BY id"),
             Row("1", "a", "p1") :: Row("2", "b", "p2") :: Row("3", "c", "p1") 
:: Row(
               "4",
               "d",
@@ -137,31 +143,33 @@ class MigrateFileProcedureTest extends PaimonHiveTestBase 
{
   Seq("parquet", "orc", "avro").foreach(
     format => {
       test(s"Paimon migrate file procedure: migrate $format partitioned 
table") {
-        withTable(s"hive_tbl4$random", s"paimon_tbl4$random") {
+        withTable(s"hive_tbl4$random$format", s"paimon_tbl4$random$format") {
           // create hive table
           spark.sql(s"""
-                       |CREATE TABLE hive_tbl4$random (id STRING, name STRING, 
pt STRING)
+                       |CREATE TABLE hive_tbl4$random$format (id STRING, name 
STRING, pt STRING)
                        |USING $format
                        |PARTITIONED BY (pt)
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO hive_tbl4$random VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
+          spark.sql(
+            s"INSERT INTO hive_tbl4$random$format VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
 
           // create paimon table
           spark.sql(s"""
-                       |CREATE TABLE paimon_tbl4$random (id STRING, name 
STRING, pt STRING)
+                       |CREATE TABLE paimon_tbl4$random$format (id STRING, 
name STRING, pt STRING)
                        |USING PAIMON
                        |TBLPROPERTIES ('file.format'='$format', 'bucket'='-1')
                        |PARTITIONED BY (pt)
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO paimon_tbl4$random VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
+          spark.sql(
+            s"INSERT INTO paimon_tbl4$random$format VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
 
           spark.sql(
-            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl4$random', target_table => 
'$hiveDbName.paimon_tbl4$random')")
+            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl4$random$format', target_table => 
'$hiveDbName.paimon_tbl4$random$format')")
 
           checkAnswer(
-            spark.sql(s"SELECT * FROM paimon_tbl4$random ORDER BY id"),
+            spark.sql(s"SELECT * FROM paimon_tbl4$random$format ORDER BY id"),
             Row("1", "a", "p1") :: Row("2", "b", "p2") :: Row("3", "c", "p1") 
:: Row(
               "4",
               "d",
@@ -174,37 +182,39 @@ class MigrateFileProcedureTest extends PaimonHiveTestBase 
{
     format => {
       test(
         s"Paimon migrate file procedure: migrate $format partitioned table 
with delete source table") {
-        withTable(s"hive_tbl5$random", s"paimon_tbl5$random") {
+        withTable(s"hive_tbl5$random$format", s"paimon_tbl5$random$format") {
           // create hive table
           spark.sql(s"""
-                       |CREATE TABLE hive_tbl5$random (id STRING, name STRING, 
pt STRING)
+                       |CREATE TABLE hive_tbl5$random$format (id STRING, name 
STRING, pt STRING)
                        |USING $format
                        |PARTITIONED BY (pt)
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO hive_tbl5$random VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
+          spark.sql(
+            s"INSERT INTO hive_tbl5$random$format VALUES ('1', 'a', 'p1'), 
('2', 'b', 'p2')")
 
           // create paimon table
           spark.sql(s"""
-                       |CREATE TABLE paimon_tbl5$random (id STRING, name 
STRING, pt STRING)
+                       |CREATE TABLE paimon_tbl5$random$format (id STRING, 
name STRING, pt STRING)
                        |USING PAIMON
                        |TBLPROPERTIES ('file.format'='$format', 'bucket'='-1')
                        |PARTITIONED BY (pt)
                        |""".stripMargin)
 
-          spark.sql(s"INSERT INTO paimon_tbl5$random VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
+          spark.sql(
+            s"INSERT INTO paimon_tbl5$random$format VALUES ('3', 'c', 'p1'), 
('4', 'd', 'p2')")
 
           spark.sql(
-            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl5$random', target_table => 
'$hiveDbName.paimon_tbl5$random', delete_origin => false)")
+            s"CALL sys.migrate_table(source_type => 'hive', table => 
'$hiveDbName.hive_tbl5$random$format', target_table => 
'$hiveDbName.paimon_tbl5$random$format', delete_origin => false)")
 
           checkAnswer(
-            spark.sql(s"SELECT * FROM paimon_tbl5$random ORDER BY id"),
+            spark.sql(s"SELECT * FROM paimon_tbl5$random$format ORDER BY id"),
             Row("1", "a", "p1") :: Row("2", "b", "p2") :: Row("3", "c", "p1") 
:: Row(
               "4",
               "d",
               "p2") :: Nil)
 
-          checkAnswer(spark.sql(s"SELECT * FROM hive_tbl5$random ORDER BY 
id"), Nil)
+          checkAnswer(spark.sql(s"SELECT * FROM hive_tbl5$random$format ORDER 
BY id"), Nil)
         }
       }
     })

Reply via email to