stream2000 commented on code in PR #8983:
URL: https://github.com/apache/hudi/pull/8983#discussion_r1233466039
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/TestInsertTable.scala:
##########
@@ -1077,56 +1077,59 @@ class TestInsertTable extends HoodieSparkSqlTestBase {
test("Test Bulk Insert Into Bucket Index Table") {
withSQLConf("hoodie.datasource.write.operation" -> "bulk_insert") {
- withTempDir { tmp =>
- val tableName = generateTableName
- // Create a partitioned table
- spark.sql(
- s"""
- |create table $tableName (
- | id int,
- | dt string,
- | name string,
- | price double,
- | ts long
- |) using hudi
- | tblproperties (
- | primaryKey = 'id,name',
- | preCombineField = 'ts',
- | hoodie.index.type = 'BUCKET',
- | hoodie.bucket.index.hash.field = 'id,name')
- | partitioned by (dt)
- | location '${tmp.getCanonicalPath}'
- """.stripMargin)
+ Seq("mor", "cow").foreach { tableType =>
+ withTempDir { tmp =>
+ val tableName = generateTableName
+ // Create a partitioned table
+ spark.sql(
+ s"""
+ |create table $tableName (
+ | id int,
+ | dt string,
+ | name string,
+ | price double,
+ | ts long
+ |) using hudi
+ | tblproperties (
+ | primaryKey = 'id,name',
+ | type = '$tableType',
+ | preCombineField = 'ts',
Review Comment:
You are right. The test here is actually testing `bulk insert as row` for
bucket index. And if we set `hoodie.datasource.write.row.writer.enable` as
false the logic I add will be tested.
Will add another pr to test the combination of
cow/mor/bulk_insert/bulk_insert_as_row.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]