arekusuri commented on a change in pull request #2722: GOBBLIN-865: Add feature 
that enables PK-chunking in partition
URL: https://github.com/apache/incubator-gobblin/pull/2722#discussion_r320967132
 
 

 ##########
 File path: 
gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceSource.java
 ##########
 @@ -146,6 +156,95 @@ protected void addLineageSourceInfo(SourceState 
sourceState, SourceEntity entity
 
   @Override
   protected List<WorkUnit> generateWorkUnits(SourceEntity sourceEntity, 
SourceState state, long previousWatermark) {
+    String partitionType = state.getProp(PARTITION_TYPE, "PK_CHUNKING");
+    if (partitionType.equals("PK_CHUNKING")) {
+      return generateWorkUnitsPkChunking(sourceEntity, state, 
previousWatermark);
+    } else {
+      return generateWorkUnitsStrategy(sourceEntity, state, previousWatermark);
+    }
+  }
+
+  /**
+   * generate workUnit with noQuery=true
+   */
+  private List<WorkUnit> generateWorkUnitsPkChunking(SourceEntity 
sourceEntity, SourceState state, long previousWatermark) {
+      List<SalesforceExtractor.BatchIdAndResultId> batchIdAndResultIds = 
executeQueryWithPkChunking(state, previousWatermark);
+      List<WorkUnit> ret = createWorkUnits(sourceEntity, state, 
batchIdAndResultIds);
+      return ret;
+  }
+
+  private List<SalesforceExtractor.BatchIdAndResultId> 
executeQueryWithPkChunking(
+      SourceState sourceState,
+      long previousWatermark
+  ) throws RuntimeException {
+    Properties commonProperties = sourceState.getCommonProperties();
+    Properties specProperties = sourceState.getSpecProperties();
+    State state = new State();
+    state.setProps(commonProperties, specProperties);
+    WorkUnit workUnit = WorkUnit.createEmpty();
+    try {
+      WorkUnitState workUnitState = new WorkUnitState(workUnit, state);
+      workUnitState.setId("test" + new Random().nextInt());
+      workUnitState.setProp(ENABLE_PK_CHUNKING_KEY, true); // set extractor 
enable pk chunking
+      int chunkSize = workUnitState.getPropAsInt(PARTITION_PK_CHUNKING_SIZE, 
DEFAULT_PK_CHUNKING_SIZE);
+      workUnitState.setProp(PK_CHUNKING_SIZE_KEY, chunkSize); // set extractor 
pk chunking size
+      workUnitState.setProp(PK_CHUNKING_SKIP_COUNT_CHECK, true); // don't use 
count check for we couldn't get count
+      SalesforceExtractor salesforceExtractor = (SalesforceExtractor) 
this.getExtractor(workUnitState);
+      String entity = sourceState.getProp(ConfigurationKeys.SOURCE_ENTITY);
+      Partitioner partitioner = new Partitioner(sourceState);
+      if (isEarlyStopEnabled(state) && partitioner.isFullDump()) {
+        throw new UnsupportedOperationException("Early stop mode cannot work 
with full dump mode.");
+      }
+      Partition partition = partitioner.getGlobalPartition(previousWatermark);
+      String condition = "";
+      Date startDate = Utils.toDate(partition.getLowWatermark(), 
Partitioner.WATERMARKTIMEFORMAT);
+      String field = 
sourceState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY);
+      if (startDate != null && field != null) {
+        String lowWatermarkDate = Utils.dateToString(startDate, 
SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
+        condition = field + " >= " + lowWatermarkDate;
+      }
+      Predicate predicate = new Predicate(null, 0, condition, "", null);
+      List<Predicate> predicateList = Arrays.asList(predicate);
+      List<SalesforceExtractor.BatchIdAndResultId> ids = 
salesforceExtractor.getQueryResultIds(entity, predicateList);
+      return ids;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private List<WorkUnit> createWorkUnits(
+      SourceEntity sourceEntity,
+      SourceState state,
+      List<SalesforceExtractor.BatchIdAndResultId> batchResultIds
+  ) {
+    String nameSpaceName = 
state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
+    Extract.TableType tableType = 
Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
+    String outputTableName = sourceEntity.getDestTableName();
+    Extract extract = createExtract(tableType, nameSpaceName, outputTableName);
+
+    List<WorkUnit> workUnits = Lists.newArrayList();
+    int partitionNumber = 
state.getPropAsInt(SOURCE_SOURCE_MAX_NUMBER_OF_PARTITIONS, 1);
+    int maxPartition = (batchResultIds.size() + partitionNumber - 
1)/partitionNumber;
+    List<List<SalesforceExtractor.BatchIdAndResultId>> partitionedResultIds = 
Lists.partition(batchResultIds, maxPartition);
+    String bulkJobId = batchResultIds.get(0).getBulkJobId();
 
 Review comment:
   Made it 
   ```
   class SalesforceBulkJob {
     String jobId;
     List<BatchIdAndResultId> batchIdAndResultIdList;
   }
   ```

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to