amrishlal commented on code in PR #9169:
URL: https://github.com/apache/hudi/pull/9169#discussion_r1260152774


##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/TestInsertTable.scala:
##########
@@ -1191,88 +1191,68 @@ class TestInsertTable extends HoodieSparkSqlTestBase {
     }
   }
 
-  test("Test Bulk Insert Into Bucket Index Table") {
-    withSQLConf("hoodie.datasource.write.operation" -> "bulk_insert", 
"hoodie.bulkinsert.shuffle.parallelism" -> "1") {
-      Seq("mor", "cow").foreach { tableType =>
-        Seq("true", "false").foreach { bulkInsertAsRow =>
-          withTempDir { tmp =>
-            val tableName = generateTableName
-            // Create a partitioned table
-            spark.sql(
-              s"""
-                 |create table $tableName (
-                 |  id int,
-                 |  dt string,
-                 |  name string,
-                 |  price double,
-                 |  ts long
-                 |) using hudi
-                 | tblproperties (
-                 | primaryKey = 'id,name',
-                 | type = '$tableType',
-                 | preCombineField = 'ts',
-                 | hoodie.index.type = 'BUCKET',
-                 | hoodie.bucket.index.hash.field = 'id,name',
-                 | hoodie.datasource.write.row.writer.enable = 
'$bulkInsertAsRow')
-                 | partitioned by (dt)
-                 | location '${tmp.getCanonicalPath}'
-                 """.stripMargin)
-
-            // Note: Do not write the field alias, the partition field must be 
placed last.
-            spark.sql(
-              s"""
-                 | insert into $tableName values
-                 | (1, 'a1,1', 10, 1000, "2021-01-05"),
-                 | (2, 'a2', 20, 2000, "2021-01-06"),
-                 | (3, 'a3,3', 30, 3000, "2021-01-07")
-                 """.stripMargin)
-
-            checkAnswer(s"select id, name, price, ts, dt from $tableName")(
-              Seq(1, "a1,1", 10.0, 1000, "2021-01-05"),
-              Seq(2, "a2", 20.0, 2000, "2021-01-06"),
-              Seq(3, "a3,3", 30.0, 3000, "2021-01-07")
-            )
-
-            spark.sql(
-              s"""
-                 | insert into $tableName values
-                 | (1, 'a1', 10, 1000, "2021-01-05"),
-                 | (3, "a3", 30, 3000, "2021-01-07")
-               """.stripMargin)
-
-            checkAnswer(s"select id, name, price, ts, dt from $tableName")(
-              Seq(1, "a1,1", 10.0, 1000, "2021-01-05"),
-              Seq(1, "a1", 10.0, 1000, "2021-01-05"),
-              Seq(2, "a2", 20.0, 2000, "2021-01-06"),
-              Seq(3, "a3,3", 30.0, 3000, "2021-01-07"),
-              Seq(3, "a3", 30.0, 3000, "2021-01-07")
-            )
-
-            // there are two files in partition(dt = '2021-01-05')
-            checkAnswer(s"select count(distinct _hoodie_file_name) from 
$tableName where dt = '2021-01-05'")(
-              Seq(2)
-            )
-
-            // would generate 6 other files in partition(dt = '2021-01-05')
-            spark.sql(
-              s"""
-                 | insert into $tableName values
-                 | (4, 'a1,1', 10, 1000, "2021-01-05"),
-                 | (5, 'a1,1', 10, 1000, "2021-01-05"),
-                 | (6, 'a1,1', 10, 1000, "2021-01-05"),
-                 | (7, 'a1,1', 10, 1000, "2021-01-05"),
-                 | (8, 'a1,1', 10, 1000, "2021-01-05"),
-                 | (9, 'a3,3', 30, 3000, "2021-01-05")
-               """.stripMargin)
-
-            checkAnswer(s"select count(distinct _hoodie_file_name) from 
$tableName where dt = '2021-01-05'")(
-              Seq(8)
-            )
-          }
-        }
-      }
-    }
-  }
+  // FAILING TEST CASE. DO NOT UNCOMMENT BEFORE FIXING THE TEST CASE (see 
https://github.com/apache/hudi/pull/9156)

Review Comment:
   Fixed using following syntax/structure
   
   ```
     /** Ignore failing test case (see HUDI-6521 for more details) */
     ignore("Test Bulk Insert Into Bucket Index Table") {
       ...
     }
   
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to