JingsongLi commented on code in PR #550:
URL: https://github.com/apache/flink-table-store/pull/550#discussion_r1122527375


##########
flink-table-store-core/src/main/java/org/apache/flink/table/store/table/sink/TableWrite.java:
##########
@@ -18,39 +18,42 @@
 
 package org.apache.flink.table.store.table.sink;
 
+import org.apache.flink.table.store.annotation.Experimental;
 import org.apache.flink.table.store.data.BinaryRow;
 import org.apache.flink.table.store.data.InternalRow;
 import org.apache.flink.table.store.file.disk.IOManager;
-import org.apache.flink.table.store.file.io.DataFileMeta;
-
-import java.util.List;
+import org.apache.flink.table.store.table.Table;
 
 /**
- * An abstraction layer above {@link 
org.apache.flink.table.store.file.operation.FileStoreWrite} to
- * provide {@link InternalRow} writing.
+ * Write of {@link Table} to provide {@link InternalRow} writing.
+ *
+ * @since 0.4.0
  */
+@Experimental
 public interface TableWrite extends AutoCloseable {
 
-    TableWrite withOverwrite(boolean overwrite);
-
+    /** With {@link IOManager}, this is needed if 'write-buffer-spillable' is 
set to true. */
     TableWrite withIOManager(IOManager ioManager);
 
-    SinkRecord write(InternalRow rowData) throws Exception;
+    /** Calculate which partition {@code row} belongs to. */
+    BinaryRow getPartition(InternalRow row);
 
-    /** Log record need to preserve original pk (which includes partition 
fields). */
-    SinkRecord toLogRecord(SinkRecord record);
+    /** Calculate which bucket {@code row} belongs to. */
+    int getBucket(InternalRow row);
 
-    void compact(BinaryRow partition, int bucket, boolean fullCompaction) 
throws Exception;
+    /** Write a row to the writer. */
+    void write(InternalRow row) throws Exception;
 
     /**
-     * Notify that some new files are created at given snapshot in given 
bucket.
+     * Compact a bucket of a partition. By default, it will determine whether 
to perform the
+     * compaction according to the 'num-sorted-run.compaction-trigger' option. 
If fullCompaction is
+     * true, it will force a full compaction, which is expensive.
      *
-     * <p>Most probably, these files are created by another job. Currently 
this method is only used
-     * by the dedicated compact job to see files created by writer jobs.
+     * <p>NOTE: In Java API, full compaction is not automatically executed. If 
you open
+     * 'changelog-producer' of 'full-compaction', please execute this method 
regularly to produce

Review Comment:
   set 'changelog-producer' to 'full-compaction'



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to