arekusuri commented on a change in pull request #2722: GOBBLIN-865: Add feature 
that enables PK-chunking in partition
URL: https://github.com/apache/incubator-gobblin/pull/2722#discussion_r321539964
 
 

 ##########
 File path: 
gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceSource.java
 ##########
 @@ -146,6 +156,95 @@ protected void addLineageSourceInfo(SourceState 
sourceState, SourceEntity entity
 
   @Override
   protected List<WorkUnit> generateWorkUnits(SourceEntity sourceEntity, 
SourceState state, long previousWatermark) {
+    String partitionType = state.getProp(PARTITION_TYPE, "PK_CHUNKING");
+    if (partitionType.equals("PK_CHUNKING")) {
+      return generateWorkUnitsPkChunking(sourceEntity, state, 
previousWatermark);
+    } else {
+      return generateWorkUnitsStrategy(sourceEntity, state, previousWatermark);
+    }
+  }
+
+  /**
+   * generate workUnit with noQuery=true
+   */
+  private List<WorkUnit> generateWorkUnitsPkChunking(SourceEntity 
sourceEntity, SourceState state, long previousWatermark) {
+      List<SalesforceExtractor.BatchIdAndResultId> batchIdAndResultIds = 
executeQueryWithPkChunking(state, previousWatermark);
+      List<WorkUnit> ret = createWorkUnits(sourceEntity, state, 
batchIdAndResultIds);
+      return ret;
+  }
+
+  private List<SalesforceExtractor.BatchIdAndResultId> 
executeQueryWithPkChunking(
+      SourceState sourceState,
+      long previousWatermark
+  ) throws RuntimeException {
+    Properties commonProperties = sourceState.getCommonProperties();
+    Properties specProperties = sourceState.getSpecProperties();
+    State state = new State();
+    state.setProps(commonProperties, specProperties);
+    WorkUnit workUnit = WorkUnit.createEmpty();
+    try {
+      WorkUnitState workUnitState = new WorkUnitState(workUnit, state);
+      workUnitState.setId("test" + new Random().nextInt());
+      workUnitState.setProp(ENABLE_PK_CHUNKING_KEY, true); // set extractor 
enable pk chunking
+      int chunkSize = workUnitState.getPropAsInt(PARTITION_PK_CHUNKING_SIZE, 
DEFAULT_PK_CHUNKING_SIZE);
+      workUnitState.setProp(PK_CHUNKING_SIZE_KEY, chunkSize); // set extractor 
pk chunking size
+      workUnitState.setProp(PK_CHUNKING_SKIP_COUNT_CHECK, true); // don't use 
count check for we couldn't get count
+      SalesforceExtractor salesforceExtractor = (SalesforceExtractor) 
this.getExtractor(workUnitState);
+      String entity = sourceState.getProp(ConfigurationKeys.SOURCE_ENTITY);
+      Partitioner partitioner = new Partitioner(sourceState);
+      if (isEarlyStopEnabled(state) && partitioner.isFullDump()) {
+        throw new UnsupportedOperationException("Early stop mode cannot work 
with full dump mode.");
+      }
+      Partition partition = partitioner.getGlobalPartition(previousWatermark);
+      String condition = "";
 
 Review comment:
   How about we leave this refactoring later?
   It doesn't look like I can use the function directly. In PK-chunking case, 
we only have start-time, we don't have end-time. end-time is always the 
current-time.
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to