[ https://issues.apache.org/jira/browse/GOBBLIN-865?focusedWorklogId=353634&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-353634 ]
ASF GitHub Bot logged work on GOBBLIN-865: ------------------------------------------ Author: ASF GitHub Bot Created on: 04/Dec/19 18:35 Start Date: 04/Dec/19 18:35 Worklog Time Spent: 10m Work Description: htran1 commented on pull request #2722: GOBBLIN-865: Add feature that enables PK-chunking in partition URL: https://github.com/apache/incubator-gobblin/pull/2722#discussion_r353909717 ########## File path: gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceSource.java ########## @@ -146,12 +153,120 @@ protected void addLineageSourceInfo(SourceState sourceState, SourceEntity entity @Override protected List<WorkUnit> generateWorkUnits(SourceEntity sourceEntity, SourceState state, long previousWatermark) { + log.info("====sfdc connector with pkchk===="); // TODO: remove this after merge back to OS. + List<WorkUnit> workUnits = null; + String partitionType = state.getProp(SALESFORCE_PARTITION_TYPE, ""); + if (partitionType.equals("PK_CHUNKING")) { + // pk-chunking only supports start-time by source.querybased.start.value, and does not support end-time. + // always ingest data later than or equal source.querybased.start.value. + // we should only pk chunking based work units only in case of snapshot/full ingestion + workUnits = generateWorkUnitsPkChunking(sourceEntity, state, previousWatermark); + } else { + workUnits = generateWorkUnitsStrategy(sourceEntity, state, previousWatermark); + } + log.info("====Generated {} workUnit(s)====", workUnits.size()); + return workUnits; + } + + /** + * generate workUnit for pk chunking + */ + private List<WorkUnit> generateWorkUnitsPkChunking(SourceEntity sourceEntity, SourceState state, long previousWatermark) { + JobIdAndBatchIdResultIdList jobIdAndBatchIdResultIdList = executeQueryWithPkChunking(state, previousWatermark); + return createWorkUnits(sourceEntity, state, jobIdAndBatchIdResultIdList); + } + + private JobIdAndBatchIdResultIdList executeQueryWithPkChunking( + SourceState sourceState, + long previousWatermark + ) throws RuntimeException { + State state = new State(sourceState); + WorkUnit workUnit = WorkUnit.createEmpty(); + WorkUnitState workUnitState = new WorkUnitState(workUnit, state); + workUnitState.setId("Execute pk-chunking"); + try { + SalesforceExtractor salesforceExtractor = (SalesforceExtractor) this.getExtractor(workUnitState); + Partitioner partitioner = new Partitioner(sourceState); + if (isEarlyStopEnabled(state) && partitioner.isFullDump()) { + throw new UnsupportedOperationException("Early stop mode cannot work with full dump mode."); + } + Partition partition = partitioner.getGlobalPartition(previousWatermark); + String condition = ""; + Date startDate = Utils.toDate(partition.getLowWatermark(), Partitioner.WATERMARKTIMEFORMAT); + String field = sourceState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY); + // pk-chunking only supports start-time by source.querybased.start.value, and does not support end-time. + // always ingest data later than or equal source.querybased.start.value. + // we should only pk chunking based work units only in case of snapshot/full ingestion + if (startDate != null && field != null) { + String lowWatermarkDate = Utils.dateToString(startDate, SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT); + condition = field + " >= " + lowWatermarkDate; + } + Predicate predicate = new Predicate(null, 0, condition, "", null); + List<Predicate> predicateList = Arrays.asList(predicate); + String entity = sourceState.getProp(ConfigurationKeys.SOURCE_ENTITY); + + if (state.contains(PK_CHUNKING_TEST_JOB_ID)) { + String jobId = state.getProp(PK_CHUNKING_TEST_JOB_ID, ""); + log.info("---Skip query, fetching result files directly for [jobId={}]", jobId); + String batchIdListStr = state.getProp(PK_CHUNKING_TEST_BATCH_ID_LIST); + return salesforceExtractor.getQueryResultIdsPkChunkingFetchOnly(jobId, batchIdListStr); + } else { + log.info("---Pk Chunking query submit."); + return salesforceExtractor.getQueryResultIdsPkChunking(entity, predicateList); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Create work units by taking a bulkJobId. + * The work units won't contain a query in this case. Instead they will contain a BulkJobId and a list of `batchId:resultId` + * So in extractor, the work to do is just to fetch the resultSet files. + */ + private List<WorkUnit> createWorkUnits( + SourceEntity sourceEntity, + SourceState state, + JobIdAndBatchIdResultIdList jobIdAndBatchIdResultIdList + ) { + String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY); + Extract.TableType tableType = Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase()); + String outputTableName = sourceEntity.getDestTableName(); + Extract extract = createExtract(tableType, nameSpaceName, outputTableName); + + List<WorkUnit> workUnits = Lists.newArrayList(); + int partitionNumber = state.getPropAsInt(SOURCE_MAX_NUMBER_OF_PARTITIONS, 1); + List<BatchIdAndResultId> batchResultIds = jobIdAndBatchIdResultIdList.getBatchIdAndResultIdList(); + int total = batchResultIds.size(); + // size of every partition should be: math.ceil(total/partitionNumber), use simpler way: (total+partitionNumber-1)/partitionNumber + int sizeOfPartition = (total + partitionNumber - 1) / partitionNumber; + List<List<BatchIdAndResultId>> partitionedResultIds = Lists.partition(batchResultIds, sizeOfPartition); + log.info("----partition strategy: max-parti={}, size={}, actual-parti={}, total={}", partitionNumber, sizeOfPartition, partitionedResultIds.size(), total); + for (List<BatchIdAndResultId> resultIds : partitionedResultIds) { Review comment: Can you add new lines between the blocks? ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 353634) Time Spent: 10h (was: 9h 50m) > Add feature that enables PK-chunking in partition > -------------------------------------------------- > > Key: GOBBLIN-865 > URL: https://issues.apache.org/jira/browse/GOBBLIN-865 > Project: Apache Gobblin > Issue Type: Task > Reporter: Alex Li > Priority: Major > Labels: salesforce > Time Spent: 10h > Remaining Estimate: 0h > > In SFDC(salesforce) connector, we have partitioning mechanisms to split a > giant query to multiple sub queries. There are 3 mechanisms: > * simple partition (equally split by time) > * dynamic pre-partition (generate histogram and split by row numbers) > * user specified partition (set up time range in job file) > However there are tables like Task and Contract are failing time to time to > fetch full data. > We may want to utilize PK-chunking to partition the query. > > The pk-chunking doc from SFDC - > [https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch/async_api_headers_enable_pk_chunking.htm] -- This message was sent by Atlassian Jira (v8.3.4#803005)