zzcclp commented on code in PR #5258:
URL: https://github.com/apache/incubator-gluten/pull/5258#discussion_r1547520685


##########
backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTableAfterRestart.scala:
##########
@@ -182,43 +187,180 @@ class GlutenClickHouseTableAfterRestart
       assert(stats2.missCount() - oldMissingCount2 == 0)
     }
 
+    val oldMissingCount1 = 
ClickhouseSnapshot.deltaScanCache.stats().missCount()
+    val oldMissingCount2 = 
ClickhouseSnapshot.addFileToAddMTPCache.stats().missCount()
+
+    restartSpark()
+
+    runTPCHQueryBySQL(1, sqlStr)(_ => {})
+
+    // after restart, additionally check stats of delta scan cache
+    val stats1 = ClickhouseSnapshot.deltaScanCache.stats()
+    assert(stats1.missCount() - oldMissingCount1 == 1)
+    val stats2 = ClickhouseSnapshot.addFileToAddMTPCache.stats()
+    assert(stats2.missCount() - oldMissingCount2 == 6)
+
+  }
+
+  test("test optimize after restart") {
+    spark.sql(s"""
+                 |DROP TABLE IF EXISTS table_restart_optimize;
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 |CREATE TABLE IF NOT EXISTS table_restart_optimize (id 
bigint,  name string)
+                 |USING clickhouse
+                 |LOCATION '$basePath/table_restart_optimize'
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 | insert into table table_restart_optimize values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+    // second file
+    spark.sql(s"""
+                 | insert into table table_restart_optimize values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+
+    restartSpark()
+
+    spark.sql("optimize table_restart_optimize")
+    assert(spark.sql("select count(*) from 
table_restart_optimize").collect().apply(0).get(0) == 4)
+  }
+
+  test("test vacuum after restart") {
+    spark.sql(s"""
+                 |DROP TABLE IF EXISTS table_restart_vacuum;
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 |CREATE TABLE IF NOT EXISTS table_restart_vacuum (id bigint,  
name string)
+                 |USING clickhouse
+                 |LOCATION '$basePath/table_restart_vacuum'
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 | insert into table table_restart_vacuum values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+    // second file
+    spark.sql(s"""
+                 | insert into table table_restart_vacuum values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+
+    spark.sql("optimize table_restart_vacuum")
+
+    restartSpark()
+
+    spark.sql("set spark.gluten.enabled=false")
+    spark.sql("vacuum table_restart_vacuum")
+    spark.sql("set spark.gluten.enabled=true")
+
+    assert(spark.sql("select count(*) from 
table_restart_vacuum").collect().apply(0).get(0) == 4)
+  }
+
+  test("test update after restart") {
+    spark.sql(s"""
+                 |DROP TABLE IF EXISTS table_restart_update;
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 |CREATE TABLE IF NOT EXISTS table_restart_update (id bigint,  
name string)
+                 |USING clickhouse
+                 |LOCATION '$basePath/table_restart_update'
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 | insert into table table_restart_update values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+    // second file
+    spark.sql(s"""
+                 | insert into table table_restart_update values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+
+    restartSpark()
+
+    spark.sql("update table_restart_update set name = 'tom' where id = 1")
+
+    assert(spark.sql("select count(*) from 
table_restart_update").collect().apply(0).get(0) == 4)
+  }
+
+  test("test delete after restart") {
+    spark.sql(s"""
+                 |DROP TABLE IF EXISTS table_restart_delete;
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 |CREATE TABLE IF NOT EXISTS table_restart_delete (id bigint,  
name string)
+                 |USING clickhouse
+                 |LOCATION '$basePath/table_restart_delete'
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 | insert into table table_restart_delete values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+    // second file
+    spark.sql(s"""
+                 | insert into table table_restart_delete values (1,"tom"), 
(2, "jim")
+                 |""".stripMargin)
+
+    restartSpark()
+
+    spark.sql("delete from table_restart_delete where where id = 1")
+
+    assert(spark.sql("select count(*) from 
table_restart_delete").collect().apply(0).get(0) == 2)
+  }
+
+  test("test drop after restart") {
+    spark.sql(s"""
+                 |DROP TABLE IF EXISTS table_restart_drop;
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 |CREATE TABLE IF NOT EXISTS table_restart_drop (id bigint,  
name string)
+                 |USING clickhouse
+                 |LOCATION '$basePath/table_restart_drop'
+                 |""".stripMargin)
+
+    spark.sql(s"""
+                 | insert into table table_restart_drop values (1,"tom"), (2, 
"jim")
+                 |""".stripMargin)
+    // second file
+    spark.sql(s"""
+                 | insert into table table_restart_drop values (1,"tom"), (2, 
"jim")
+                 |""".stripMargin)
+
+    restartSpark()
+
+    spark.sql("drop  table_restart_drop")

Review Comment:
   drop table



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to