nsivabalan commented on a change in pull request #4645:
URL: https://github.com/apache/hudi/pull/4645#discussion_r789365060



##########
File path: 
hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieMultiTableDeltaStreamer.java
##########
@@ -370,50 +441,124 @@ public static void main(String[] args) throws 
IOException {
   private static String resetTarget(Config configuration, String database, 
String tableName) {
     String basePathPrefix = configuration.basePathPrefix;
     basePathPrefix = basePathPrefix.charAt(basePathPrefix.length() - 1) == '/' 
? basePathPrefix.substring(0, basePathPrefix.length() - 1) : basePathPrefix;
-    String targetBasePath = basePathPrefix + Constants.FILE_DELIMITER + 
database + Constants.FILE_DELIMITER + tableName;
-    configuration.targetTableName = database + Constants.DELIMITER + tableName;
+    String targetBasePath = basePathPrefix + Constants.PATH_SEPARATOR + 
database + Constants.PATH_SEPARATOR + tableName;
+    configuration.targetTableName = database + Constants.PATH_CUR_DIR + 
tableName;
     return targetBasePath;
   }
 
   /**
    * Creates actual HoodieDeltaStreamer objects for every table/topic and does 
incremental sync.
    */
   public void sync() {
+    List<HoodieDeltaStreamer> hdsObjectList = new ArrayList<>();
+
+    // The sync function is not executed when multiple sources update the same 
target.

Review comment:
       probably we can have a big if else  blocks for single source vs multiple 
sources for one hudi table. would be easy to reason about and maintain. 
   existing code will go into if block and new code for multiple source will go 
into else block. 

##########
File path: 
hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieMultiTableDeltaStreamer.java
##########
@@ -370,50 +441,124 @@ public static void main(String[] args) throws 
IOException {
   private static String resetTarget(Config configuration, String database, 
String tableName) {
     String basePathPrefix = configuration.basePathPrefix;
     basePathPrefix = basePathPrefix.charAt(basePathPrefix.length() - 1) == '/' 
? basePathPrefix.substring(0, basePathPrefix.length() - 1) : basePathPrefix;
-    String targetBasePath = basePathPrefix + Constants.FILE_DELIMITER + 
database + Constants.FILE_DELIMITER + tableName;
-    configuration.targetTableName = database + Constants.DELIMITER + tableName;
+    String targetBasePath = basePathPrefix + Constants.PATH_SEPARATOR + 
database + Constants.PATH_SEPARATOR + tableName;
+    configuration.targetTableName = database + Constants.PATH_CUR_DIR + 
tableName;
     return targetBasePath;
   }
 
   /**
    * Creates actual HoodieDeltaStreamer objects for every table/topic and does 
incremental sync.
    */
   public void sync() {
+    List<HoodieDeltaStreamer> hdsObjectList = new ArrayList<>();
+
+    // The sync function is not executed when multiple sources update the same 
target.
     for (TableExecutionContext context : tableExecutionContexts) {
       try {
-        new HoodieDeltaStreamer(context.getConfig(), jssc, 
Option.ofNullable(context.getProperties())).sync();
+        HoodieDeltaStreamer hds = new HoodieDeltaStreamer(context.getConfig(), 
jssc, Option.ofNullable(context.getProperties()));
+
+        // Add object of HoodieDeltaStreamer temporarily to hdsObjectList when 
multiple sources update the same target.
+        if 
(!StringUtils.isNullOrEmpty(context.getProperties().getProperty(Constants.SOURCES_TO_BE_BOUND)))
 {
+          hdsObjectList.add(hds);
+          continue;
+        }
+
+        hds.sync();
         successTables.add(Helpers.getTableWithDatabase(context));
       } catch (Exception e) {
-        logger.error("error while running MultiTableDeltaStreamer for table: " 
+ context.getTableName(), e);
+        logger.error("Error while running MultiTableDeltaStreamer for table: " 
+ context.getTableName(), e);
         failedTables.add(Helpers.getTableWithDatabase(context));
       }
     }
 
-    logger.info("Ingestion was successful for topics: " + successTables);
-    if (!failedTables.isEmpty()) {
-      logger.info("Ingestion failed for topics: " + failedTables);
+    // If hdsObjectList is empty, it indicates that all source sync operations 
have been completed. In this case, directly return.
+    if (hdsObjectList.isEmpty()) {
+      logger.info("Ingestion was successful for topics: " + successTables);
+      if (!failedTables.isEmpty()) {
+        logger.info("Ingestion failed for topics: " + failedTables);
+      }
+      return;
     }
+
+    // The sync function is executing here when multiple sources update the 
same target.
+    boolean isContinuousMode = hdsObjectList.get(0).cfg.continuousMode;

Review comment:
       I guess we need to move this to L488 as 
   ```
   boolean isContinuousMode = hdsObjectList.get(i).cfg.continuousMode;
   ```
   essentially we can't have continuous mode enabled for any tables right.
   

##########
File path: 
hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieMultiTableDeltaStreamer.java
##########
@@ -370,50 +441,124 @@ public static void main(String[] args) throws 
IOException {
   private static String resetTarget(Config configuration, String database, 
String tableName) {
     String basePathPrefix = configuration.basePathPrefix;
     basePathPrefix = basePathPrefix.charAt(basePathPrefix.length() - 1) == '/' 
? basePathPrefix.substring(0, basePathPrefix.length() - 1) : basePathPrefix;
-    String targetBasePath = basePathPrefix + Constants.FILE_DELIMITER + 
database + Constants.FILE_DELIMITER + tableName;
-    configuration.targetTableName = database + Constants.DELIMITER + tableName;
+    String targetBasePath = basePathPrefix + Constants.PATH_SEPARATOR + 
database + Constants.PATH_SEPARATOR + tableName;
+    configuration.targetTableName = database + Constants.PATH_CUR_DIR + 
tableName;
     return targetBasePath;
   }
 
   /**
    * Creates actual HoodieDeltaStreamer objects for every table/topic and does 
incremental sync.
    */
   public void sync() {
+    List<HoodieDeltaStreamer> hdsObjectList = new ArrayList<>();
+
+    // The sync function is not executed when multiple sources update the same 
target.
     for (TableExecutionContext context : tableExecutionContexts) {
       try {
-        new HoodieDeltaStreamer(context.getConfig(), jssc, 
Option.ofNullable(context.getProperties())).sync();
+        HoodieDeltaStreamer hds = new HoodieDeltaStreamer(context.getConfig(), 
jssc, Option.ofNullable(context.getProperties()));
+
+        // Add object of HoodieDeltaStreamer temporarily to hdsObjectList when 
multiple sources update the same target.
+        if 
(!StringUtils.isNullOrEmpty(context.getProperties().getProperty(Constants.SOURCES_TO_BE_BOUND)))
 {
+          hdsObjectList.add(hds);
+          continue;
+        }
+
+        hds.sync();
         successTables.add(Helpers.getTableWithDatabase(context));
       } catch (Exception e) {
-        logger.error("error while running MultiTableDeltaStreamer for table: " 
+ context.getTableName(), e);
+        logger.error("Error while running MultiTableDeltaStreamer for table: " 
+ context.getTableName(), e);
         failedTables.add(Helpers.getTableWithDatabase(context));
       }
     }
 
-    logger.info("Ingestion was successful for topics: " + successTables);
-    if (!failedTables.isEmpty()) {
-      logger.info("Ingestion failed for topics: " + failedTables);
+    // If hdsObjectList is empty, it indicates that all source sync operations 
have been completed. In this case, directly return.
+    if (hdsObjectList.isEmpty()) {
+      logger.info("Ingestion was successful for topics: " + successTables);
+      if (!failedTables.isEmpty()) {
+        logger.info("Ingestion failed for topics: " + failedTables);
+      }
+      return;
     }
+
+    // The sync function is executing here when multiple sources update the 
same target.
+    boolean isContinuousMode = hdsObjectList.get(0).cfg.continuousMode;
+    do {
+      // Executing sync function by traversing hdsObjectList when multiple 
sources update the same target.
+      for (int i = 0; i < hdsObjectList.size(); i++) {
+        // Threads cannot be started when multiple sources update the same 
target.
+        if (isContinuousMode) {
+          hdsObjectList.get(i).cfg.continuousMode = false;
+        }
+
+        try {
+          hdsObjectList.get(i).sync();
+          
successTables.add(Helpers.getTableWithDatabase(tableExecutionContexts.get(i)));
+        } catch (Exception e) {
+          logger.error("Error while running MultiTableDeltaStreamer for table: 
"
+              + tableExecutionContexts.get(i).getTableName(), e);
+          
failedTables.add(Helpers.getTableWithDatabase(tableExecutionContexts.get(i)));
+          break;
+        }
+      }
+
+      logger.info("Ingestion was successful for topics: " + successTables);
+      if (!failedTables.isEmpty()) {
+        logger.info("Ingestion failed for topics: " + failedTables);
+        break;
+      }
+      successTables.clear();
+    } while (isContinuousMode);
   }
 
   public static class Constants {
+    // When there are multiple sources, you can use this configuration item to 
set an independent checkpoint for the source.
+    public static final String SOURCE_CHECKPOINT = 
"hoodie.deltastreamer.current.source.checkpoint";

Review comment:
       Do you think we can move these configs to one of the DeltaStreamer 
class. The way I look at this patch is, we are adding support to ingest data 
from multiple sources to a single hudi table. So, these configs probably fit 
well in deltastreamer. 

##########
File path: 
hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieMultiTableDeltaStreamer.java
##########
@@ -370,50 +441,124 @@ public static void main(String[] args) throws 
IOException {
   private static String resetTarget(Config configuration, String database, 
String tableName) {
     String basePathPrefix = configuration.basePathPrefix;
     basePathPrefix = basePathPrefix.charAt(basePathPrefix.length() - 1) == '/' 
? basePathPrefix.substring(0, basePathPrefix.length() - 1) : basePathPrefix;
-    String targetBasePath = basePathPrefix + Constants.FILE_DELIMITER + 
database + Constants.FILE_DELIMITER + tableName;
-    configuration.targetTableName = database + Constants.DELIMITER + tableName;
+    String targetBasePath = basePathPrefix + Constants.PATH_SEPARATOR + 
database + Constants.PATH_SEPARATOR + tableName;
+    configuration.targetTableName = database + Constants.PATH_CUR_DIR + 
tableName;
     return targetBasePath;
   }
 
   /**
    * Creates actual HoodieDeltaStreamer objects for every table/topic and does 
incremental sync.
    */
   public void sync() {
+    List<HoodieDeltaStreamer> hdsObjectList = new ArrayList<>();
+
+    // The sync function is not executed when multiple sources update the same 
target.
     for (TableExecutionContext context : tableExecutionContexts) {
       try {
-        new HoodieDeltaStreamer(context.getConfig(), jssc, 
Option.ofNullable(context.getProperties())).sync();
+        HoodieDeltaStreamer hds = new HoodieDeltaStreamer(context.getConfig(), 
jssc, Option.ofNullable(context.getProperties()));
+
+        // Add object of HoodieDeltaStreamer temporarily to hdsObjectList when 
multiple sources update the same target.
+        if 
(!StringUtils.isNullOrEmpty(context.getProperties().getProperty(Constants.SOURCES_TO_BE_BOUND)))
 {
+          hdsObjectList.add(hds);
+          continue;
+        }
+
+        hds.sync();
         successTables.add(Helpers.getTableWithDatabase(context));
       } catch (Exception e) {
-        logger.error("error while running MultiTableDeltaStreamer for table: " 
+ context.getTableName(), e);
+        logger.error("Error while running MultiTableDeltaStreamer for table: " 
+ context.getTableName(), e);
         failedTables.add(Helpers.getTableWithDatabase(context));
       }
     }
 
-    logger.info("Ingestion was successful for topics: " + successTables);
-    if (!failedTables.isEmpty()) {
-      logger.info("Ingestion failed for topics: " + failedTables);
+    // If hdsObjectList is empty, it indicates that all source sync operations 
have been completed. In this case, directly return.
+    if (hdsObjectList.isEmpty()) {
+      logger.info("Ingestion was successful for topics: " + successTables);
+      if (!failedTables.isEmpty()) {
+        logger.info("Ingestion failed for topics: " + failedTables);
+      }
+      return;
     }
+
+    // The sync function is executing here when multiple sources update the 
same target.
+    boolean isContinuousMode = hdsObjectList.get(0).cfg.continuousMode;
+    do {
+      // Executing sync function by traversing hdsObjectList when multiple 
sources update the same target.
+      for (int i = 0; i < hdsObjectList.size(); i++) {
+        // Threads cannot be started when multiple sources update the same 
target.
+        if (isContinuousMode) {
+          hdsObjectList.get(i).cfg.continuousMode = false;
+        }
+
+        try {
+          hdsObjectList.get(i).sync();
+          
successTables.add(Helpers.getTableWithDatabase(tableExecutionContexts.get(i)));
+        } catch (Exception e) {
+          logger.error("Error while running MultiTableDeltaStreamer for table: 
"
+              + tableExecutionContexts.get(i).getTableName(), e);
+          
failedTables.add(Helpers.getTableWithDatabase(tableExecutionContexts.get(i)));
+          break;
+        }
+      }
+
+      logger.info("Ingestion was successful for topics: " + successTables);
+      if (!failedTables.isEmpty()) {
+        logger.info("Ingestion failed for topics: " + failedTables);
+        break;
+      }
+      successTables.clear();
+    } while (isContinuousMode);
   }
 
   public static class Constants {
+    // When there are multiple sources, you can use this configuration item to 
set an independent checkpoint for the source.
+    public static final String SOURCE_CHECKPOINT = 
"hoodie.deltastreamer.current.source.checkpoint";
+
+    // If there are multiple sources, you can use this configuration item to 
set an alias for the source to distinguish the source.
+    // In addition, the alias is used as a suffix to distinguish the 
CHECKPOINT_KEY and CHECKPOINT_RESET_KEY of each source.
+    public static final String SOURCE_NAME = 
"hoodie.deltastreamer.current.source.name";
+
     public static final String KAFKA_TOPIC_PROP = 
"hoodie.deltastreamer.source.kafka.topic";
+
     private static final String SOURCE_SCHEMA_REGISTRY_URL_PROP = 
"hoodie.deltastreamer.schemaprovider.registry.url";
+
     private static final String TARGET_SCHEMA_REGISTRY_URL_PROP = 
"hoodie.deltastreamer.schemaprovider.registry.targetUrl";
+
     public static final String HIVE_SYNC_TABLE_PROP = 
"hoodie.datasource.hive_sync.table";
+
     private static final String SCHEMA_REGISTRY_BASE_URL_PROP = 
"hoodie.deltastreamer.schemaprovider.registry.baseUrl";
+
     private static final String SCHEMA_REGISTRY_URL_SUFFIX_PROP = 
"hoodie.deltastreamer.schemaprovider.registry.urlSuffix";
+
     private static final String SCHEMA_REGISTRY_SOURCE_URL_SUFFIX = 
"hoodie.deltastreamer.schemaprovider.registry.sourceUrlSuffix";
+
     private static final String SCHEMA_REGISTRY_TARGET_URL_SUFFIX = 
"hoodie.deltastreamer.schemaprovider.registry.targetUrlSuffix";
+
     private static final String TABLES_TO_BE_INGESTED_PROP = 
"hoodie.deltastreamer.ingestion.tablesToBeIngested";
+
+    // This configuration item specifies the database name and table name of 
the source. The format is "database.table".
+    // It is recommended that table name be the same as the alias of the 
source. If there are multiple sources, separate them with commas.
+    public static final String SOURCES_TO_BE_BOUND = 
"hoodie.deltastreamer.source.sourcesToBeBound";
+
+    private static final String SOURCE_PREFIX = "hoodie.deltastreamer.source.";
+
     private static final String INGESTION_PREFIX = 
"hoodie.deltastreamer.ingestion.";
+
     private static final String INGESTION_CONFIG_SUFFIX = ".configFile";
+
     private static final String DEFAULT_CONFIG_FILE_NAME_SUFFIX = 
"_config.properties";
+
     private static final String TARGET_BASE_PATH_PROP = 
"hoodie.deltastreamer.ingestion.targetBasePath";
+
     private static final String LOCAL_SPARK_MASTER = "local[2]";
-    private static final String FILE_DELIMITER = "/";
-    private static final String DELIMITER = ".";
+
+    public static final String PATH_SEPARATOR = "/";
+
+    public static final String PATH_CUR_DIR = ".";

Review comment:
       I would prefer to keep this as "DELIMITER". 




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to