yihua commented on a change in pull request #4630:
URL: https://github.com/apache/hudi/pull/4630#discussion_r789246484



##########
File path: 
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/TestHoodieFileIndex.scala
##########
@@ -132,14 +142,52 @@ class TestHoodieFileIndex extends HoodieClientTestBase {
     assertEquals("partition", 
fileIndex.partitionSchema.fields.map(_.name).mkString(","))
   }
 
+  @Test
+  def testPartitionSchemaWithoutKeyGenerator(): Unit = {
+    val metaClient = HoodieTestUtils.init(
+      hadoopConf, basePath, HoodieTableType.COPY_ON_WRITE, 
HoodieTableMetaClient.withPropertyBuilder()
+        .fromMetaClient(this.metaClient)
+        .setRecordKeyFields("_row_key")
+        .setPartitionFields("partition_path")
+        .setTableName("hoodie_test").build())
+    val props = Map(
+      "hoodie.insert.shuffle.parallelism" -> "4",
+      "hoodie.upsert.shuffle.parallelism" -> "4",
+      DataSourceWriteOptions.RECORDKEY_FIELD.key -> "_row_key",
+      DataSourceWriteOptions.PARTITIONPATH_FIELD.key -> "partition_path",
+      DataSourceWriteOptions.PRECOMBINE_FIELD.key -> "timestamp",
+      HoodieWriteConfig.TBL_NAME.key -> "hoodie_test",
+      DataSourceWriteOptions.OPERATION.key -> 
DataSourceWriteOptions.INSERT_OPERATION_OPT_VAL
+    )
+    val writeConfig = HoodieWriteConfig.newBuilder()
+      .withEngineType(EngineType.JAVA)
+      .withPath(basePath)
+      .withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA)
+      .withProps(props)
+      .build()
+    val context = new HoodieJavaEngineContext(new Configuration())
+    val writeClient = new HoodieJavaWriteClient(context, writeConfig)

Review comment:
       This tests using Spark datasource to read Hudi table written by 
Java/Kafka write client to make sure there is no gap.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to