danny0405 commented on a change in pull request #3171:
URL: https://github.com/apache/hudi/pull/3171#discussion_r660234371



##########
File path: 
hudi-flink/src/test/java/org/apache/hudi/table/HoodieDataSourceITCase.java
##########
@@ -627,4 +640,39 @@ private void execInsertSql(TableEnvironment tEnv, String 
insert) {
         .flatMap(Collection::stream)
         .collect(Collectors.toList());
   }
+
+  @ParameterizedTest
+  @EnumSource(value = ExecMode.class)
+  void testTableProperties(ExecMode execMode) {
+    boolean streaming = execMode == ExecMode.STREAM;
+    String hoodieTableDDL = "create table t1(\n"
+            + "  uuid varchar(20),\n"
+            + "  name varchar(10),\n"
+            + "  age int,\n"
+            + "  `partition` varchar(20),\n" // test streaming read with 
partition field in the middle
+            + "  ts timestamp(3),\n"
+            + "  PRIMARY KEY(uuid) NOT ENFORCED\n"
+            + ")\n"
+            + "PARTITIONED BY (`partition`)\n"
+            + "with (\n"
+            + "  'connector' = 'hudi',\n"
+            + "  'path' = '" + tempFile.getAbsolutePath() + "',\n"
+            + "  'read.streaming.enabled' = '" + streaming + "'\n"
+            + ")";
+    streamTableEnv.executeSql(hoodieTableDDL);
+    final String insertInto1 = "insert into t1 values\n"
+            + "('id1','Danny',23, 'par1', TIMESTAMP '1970-01-01 00:00:01')";
+    execInsertSql(streamTableEnv, insertInto1);
+    
+    // Validate the partition fields & preCombineField in hoodie.properties.
+    HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder()

Review comment:
       How about we add a new class `TestStreamerUtil` and add a test case for 
`StreamerUtil#initTableIfNotExists`, there is no need to add a ITCase here.

##########
File path: hudi-flink/src/main/java/org/apache/hudi/util/StreamerUtil.java
##########
@@ -205,10 +205,14 @@ public static void initTableIfNotExists(Configuration 
conf) throws IOException {
     // Hadoop FileSystem
     FileSystem fs = FSUtils.getFs(basePath, hadoopConf);
     if (!fs.exists(new Path(basePath, HoodieTableMetaClient.METAFOLDER_NAME))) 
{
+      String partitionField = 
conf.getString(FlinkOptions.PARTITION_PATH_FIELD, null);
+      String preCombineField = conf.getString(FlinkOptions.PRECOMBINE_FIELD);

Review comment:
       Okey, then just inline these two variables, just like the others.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to