[ https://issues.apache.org/jira/browse/GOBBLIN-865?focusedWorklogId=304592&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-304592 ]
ASF GitHub Bot logged work on GOBBLIN-865: ------------------------------------------ Author: ASF GitHub Bot Created on: 30/Aug/19 20:52 Start Date: 30/Aug/19 20:52 Worklog Time Spent: 10m Work Description: zxcware commented on pull request #2722: GOBBLIN-865: Add feature that enables PK-chunking in partition URL: https://github.com/apache/incubator-gobblin/pull/2722#discussion_r319671525 ########## File path: gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceSource.java ########## @@ -146,6 +156,95 @@ protected void addLineageSourceInfo(SourceState sourceState, SourceEntity entity @Override protected List<WorkUnit> generateWorkUnits(SourceEntity sourceEntity, SourceState state, long previousWatermark) { + String partitionType = state.getProp(PARTITION_TYPE, "PK_CHUNKING"); + if (partitionType.equals("PK_CHUNKING")) { + return generateWorkUnitsPkChunking(sourceEntity, state, previousWatermark); + } else { + return generateWorkUnitsStrategy(sourceEntity, state, previousWatermark); + } + } + + /** + * generate workUnit with noQuery=true + */ + private List<WorkUnit> generateWorkUnitsPkChunking(SourceEntity sourceEntity, SourceState state, long previousWatermark) { + List<SalesforceExtractor.BatchIdAndResultId> batchIdAndResultIds = executeQueryWithPkChunking(state, previousWatermark); + List<WorkUnit> ret = createWorkUnits(sourceEntity, state, batchIdAndResultIds); + return ret; + } + + private List<SalesforceExtractor.BatchIdAndResultId> executeQueryWithPkChunking( + SourceState sourceState, + long previousWatermark + ) throws RuntimeException { + Properties commonProperties = sourceState.getCommonProperties(); + Properties specProperties = sourceState.getSpecProperties(); + State state = new State(); + state.setProps(commonProperties, specProperties); + WorkUnit workUnit = WorkUnit.createEmpty(); + try { + WorkUnitState workUnitState = new WorkUnitState(workUnit, state); + workUnitState.setId("test" + new Random().nextInt()); + workUnitState.setProp(ENABLE_PK_CHUNKING_KEY, true); // set extractor enable pk chunking + int chunkSize = workUnitState.getPropAsInt(PARTITION_PK_CHUNKING_SIZE, DEFAULT_PK_CHUNKING_SIZE); + workUnitState.setProp(PK_CHUNKING_SIZE_KEY, chunkSize); // set extractor pk chunking size + workUnitState.setProp(PK_CHUNKING_SKIP_COUNT_CHECK, true); // don't use count check for we couldn't get count + SalesforceExtractor salesforceExtractor = (SalesforceExtractor) this.getExtractor(workUnitState); + String entity = sourceState.getProp(ConfigurationKeys.SOURCE_ENTITY); + Partitioner partitioner = new Partitioner(sourceState); + if (isEarlyStopEnabled(state) && partitioner.isFullDump()) { + throw new UnsupportedOperationException("Early stop mode cannot work with full dump mode."); + } + Partition partition = partitioner.getGlobalPartition(previousWatermark); + String condition = ""; + Date startDate = Utils.toDate(partition.getLowWatermark(), Partitioner.WATERMARKTIMEFORMAT); + String field = sourceState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY); + if (startDate != null && field != null) { + String lowWatermarkDate = Utils.dateToString(startDate, SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT); + condition = field + " >= " + lowWatermarkDate; + } + Predicate predicate = new Predicate(null, 0, condition, "", null); + List<Predicate> predicateList = Arrays.asList(predicate); + List<SalesforceExtractor.BatchIdAndResultId> ids = salesforceExtractor.getQueryResultIds(entity, predicateList); + return ids; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private List<WorkUnit> createWorkUnits( + SourceEntity sourceEntity, + SourceState state, + List<SalesforceExtractor.BatchIdAndResultId> batchResultIds + ) { + String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY); + Extract.TableType tableType = Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase()); + String outputTableName = sourceEntity.getDestTableName(); + Extract extract = createExtract(tableType, nameSpaceName, outputTableName); + + List<WorkUnit> workUnits = Lists.newArrayList(); + int partitionNumber = state.getPropAsInt(SOURCE_SOURCE_MAX_NUMBER_OF_PARTITIONS, 1); + int maxPartition = (batchResultIds.size() + partitionNumber - 1)/partitionNumber; + List<List<SalesforceExtractor.BatchIdAndResultId>> partitionedResultIds = Lists.partition(batchResultIds, maxPartition); + String bulkJobId = batchResultIds.get(0).getBulkJobId(); Review comment: We could define some object that includes the jobId and BatchIdAndResultId, given that it's the same for all batches of a job, which helps reduces duplicate data. for example: ``` SalesforceBulkJob { String jobId; BatchIdAndResultId; } ``` ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 304592) Time Spent: 1h (was: 50m) > Add feature that enables PK-chunking in partition > -------------------------------------------------- > > Key: GOBBLIN-865 > URL: https://issues.apache.org/jira/browse/GOBBLIN-865 > Project: Apache Gobblin > Issue Type: Task > Reporter: Alex Li > Priority: Major > Labels: salesforce > Time Spent: 1h > Remaining Estimate: 0h > > In SFDC(salesforce) connector, we have partitioning mechanisms to split a > giant query to multiple sub queries. There are 3 mechanisms: > * simple partition (equally split by time) > * dynamic pre-partition (generate histogram and split by row numbers) > * user specified partition (set up time range in job file) > However there are tables like Task and Contract are failing time to time to > fetch full data. > We may want to utilize PK-chunking to partition the query. > > The pk-chunking doc from SFDC - > [https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch/async_api_headers_enable_pk_chunking.htm] -- This message was sent by Atlassian Jira (v8.3.2#803003)