danny0405 commented on a change in pull request #2808:
URL: https://github.com/apache/hudi/pull/2808#discussion_r612072393



##########
File path: 
hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkWriteClient.java
##########
@@ -162,17 +162,54 @@ public void bootstrap(Option<Map<String, String>> 
extraMetadata) {
     table.validateUpsertSchema();
     preWrite(instantTime, WriteOperationType.INSERT, table.getMetaClient());
     // create the write handle if not exists
-    final HoodieRecord<T> record = records.get(0);
-    final boolean isDelta = 
table.getMetaClient().getTableType().equals(HoodieTableType.MERGE_ON_READ);
-    final HoodieWriteHandle<?, ?, ?, ?> writeHandle = 
getOrCreateWriteHandle(record, isDelta, getConfig(),
-        instantTime, table, record.getPartitionPath(), records.listIterator());
+    final HoodieWriteHandle<?, ?, ?, ?> writeHandle = 
getOrCreateWriteHandle(records.get(0), getConfig(),
+        instantTime, table, records.listIterator());
     HoodieWriteMetadata<List<WriteStatus>> result = ((HoodieFlinkTable<T>) 
table).insert(context, writeHandle, instantTime, records);
     if (result.getIndexLookupDuration().isPresent()) {
       metrics.updateIndexMetrics(LOOKUP_STR, 
result.getIndexLookupDuration().get().toMillis());
     }
     return postWrite(result, instantTime, table);
   }
 
+  /**
+   * Removes all existing records from the partitions affected and inserts the 
given HoodieRecords, into the table.
+   *
+   * @param records HoodieRecords to insert
+   * @param instantTime Instant time of the commit
+   * @return list of WriteStatus to inspect errors and counts
+   */
+  public List<WriteStatus> insertOverwrite(
+      List<HoodieRecord<T>> records, final String instantTime) {
+    HoodieTable<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> 
table =
+        getTableAndInitCtx(WriteOperationType.INSERT_OVERWRITE, instantTime);
+    table.validateInsertSchema();
+    preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE, 
table.getMetaClient());
+    // create the write handle if not exists
+    final HoodieWriteHandle<?, ?, ?, ?> writeHandle = 
getOrCreateWriteHandle(records.get(0), getConfig(),
+        instantTime, table, records.listIterator());
+    HoodieWriteMetadata result = ((HoodieFlinkTable<T>) 
table).insertOverwrite(context, writeHandle, instantTime, records);
+    return postWrite(result, instantTime, table);
+  }
+
+  /**
+   * Removes all existing records of the Hoodie table and inserts the given 
HoodieRecords, into the table.
+   *
+   * @param records HoodieRecords to insert
+   * @param instantTime Instant time of the commit
+   * @return list of WriteStatus to inspect errors and counts
+   */
+  public List<WriteStatus> insertOverwriteTable(

Review comment:
       No, because it has different method signature with 
`SparkRDDWriteClient#insertOverwriteTable `.

##########
File path: 
hudi-flink/src/main/java/org/apache/hudi/sink/partitioner/BucketAssignFunction.java
##########
@@ -190,7 +191,9 @@ public void processElement(I value, Context ctx, 
Collector<O> out) throws Except
         default:
           throw new AssertionError();
       }
-      this.indexState.put(hoodieKey, location);
+      if (isChangingRecords) {

Review comment:
       a bug




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to