ajantha-bhat commented on a change in pull request #3787:
URL: https://github.com/apache/carbondata/pull/3787#discussion_r486218311
##########
File path:
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/SecondaryIndexCreator.scala
##########
@@ -152,68 +158,181 @@ object SecondaryIndexCreator {
LOGGER.info("spark.dynamicAllocation.maxExecutors property is set to
=" + execInstance)
}
}
- var futureObjectList = List[java.util.concurrent.Future[Array[(String,
Boolean)]]]()
- for (eachSegment <- validSegmentList) {
- val segId = eachSegment
- futureObjectList :+= executorService.submit(new
Callable[Array[(String, Boolean)]] {
- @throws(classOf[Exception])
- override def call(): Array[(String, Boolean)] = {
-
ThreadLocalSessionInfo.getOrCreateCarbonSessionInfo().getNonSerializableExtraInfo
- .put("carbonConf",
SparkSQLUtil.sessionState(sc.sparkSession).newHadoopConf())
- var eachSegmentSecondaryIndexCreationStatus: Array[(String,
Boolean)] = Array.empty
- CarbonLoaderUtil.checkAndCreateCarbonDataLocation(segId,
indexCarbonTable)
- val carbonLoadModel = getCopyObject(secondaryIndexModel)
- carbonLoadModel
-
.setFactTimeStamp(secondaryIndexModel.segmentIdToLoadStartTimeMapping(eachSegment))
-
carbonLoadModel.setTablePath(secondaryIndexModel.carbonTable.getTablePath)
- val secondaryIndexCreationStatus = new
CarbonSecondaryIndexRDD(sc.sparkSession,
- new SecondaryIndexCreationResultImpl,
- carbonLoadModel,
- secondaryIndexModel.secondaryIndex,
- segId, execInstance, indexCarbonTable, forceAccessSegment,
isCompactionCall).collect()
+ var successSISegments: List[String] = List()
+ var failedSISegments: List[String] = List()
+ val sort_scope =
indexCarbonTable.getTableInfo.getFactTable.getTableProperties
+ .get("sort_scope")
+ if (sort_scope != null && sort_scope.equalsIgnoreCase("global_sort")) {
+ val mainTable =
secondaryIndexModel.carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
+ var futureObjectList = List[java.util.concurrent.Future[Array[(String,
+ (LoadMetadataDetails, ExecutionErrors))]]]()
+ for (eachSegment <- validSegmentList) {
+ futureObjectList :+= executorService
+ .submit(new Callable[Array[(String, (LoadMetadataDetails,
ExecutionErrors))]] {
+ @throws(classOf[Exception])
+ override def call(): Array[(String, (LoadMetadataDetails,
ExecutionErrors))] = {
+ val carbonLoadModel = getCopyObject(secondaryIndexModel)
+ // loading, we need to query main table add position reference
+ val proj = indexCarbonTable.getCreateOrderColumn
+ .asScala
+ .map(_.getColName)
+ .filterNot(_.equals("positionReference")).toSet
+ val explodeColumn = mainTable.getCreateOrderColumn.asScala
+ .filter(x => x.getDataType.isComplexType &&
+ proj.contains(x.getColName))
+ var dataFrame = dataFrameOfSegments(sc.sparkSession,
+ mainTable,
+ proj.mkString(","),
+ Array(eachSegment))
+ // flatten the complex SI
+ if (explodeColumn.nonEmpty) {
+ val columns = dataFrame.schema.map { x =>
+ if (x.name.equals(explodeColumn.head.getColName)) {
+ functions.explode_outer(functions.col(x.name))
+ } else {
+ functions.col(x.name)
+ }
+ }
+ dataFrame = dataFrame.select(columns: _*)
+ }
+ val dataLoadSchema = new CarbonDataLoadSchema(indexCarbonTable)
+ carbonLoadModel.setCarbonDataLoadSchema(dataLoadSchema)
+ carbonLoadModel.setTableName(indexCarbonTable.getTableName)
+
carbonLoadModel.setDatabaseName(indexCarbonTable.getDatabaseName)
+ carbonLoadModel.setTablePath(indexCarbonTable.getTablePath)
+ carbonLoadModel.setFactTimeStamp(secondaryIndexModel
+ .segmentIdToLoadStartTimeMapping(eachSegment))
+ carbonLoadModel.setSegmentId(eachSegment)
+ var result: Array[(String, (LoadMetadataDetails,
ExecutionErrors))] = null
+ try {
+ val configuration = FileFactory.getConfiguration
+
configuration.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, eachSegment)
+ def findCarbonScanRDD(rdd: RDD[_]): Unit = {
+ rdd match {
+ case d: CarbonScanRDD[_] =>
+ d.setValidateSegmentToAccess(false)
+ case others =>
+ others.dependencies.foreach {x =>
findCarbonScanRDD(x.rdd)}
+ }
+ }
+ findCarbonScanRDD(dataFrame.rdd)
+ // accumulator to collect segment metadata
+ val segmentMetaDataAccumulator = sc.sparkSession.sqlContext
+ .sparkContext
+ .collectionAccumulator[Map[String, SegmentMetaDataInfo]]
+ // TODO: use new insert into flow, instead of DataFrame
prepare RDD[InternalRow]
+ result =
DataLoadProcessBuilderOnSpark.loadDataUsingGlobalSort(
+ sc.sparkSession,
+ Some(dataFrame),
+ carbonLoadModel,
+ hadoopConf = configuration, segmentMetaDataAccumulator)
+ }
+ SegmentFileStore
+ .writeSegmentFile(indexCarbonTable,
+ eachSegment,
+ String.valueOf(carbonLoadModel.getFactTimeStamp))
+ segmentToLoadStartTimeMap
+ .put(eachSegment,
+ String.valueOf(carbonLoadModel.getFactTimeStamp))
Review comment:
Moved. These are created by reformat command itself (ctrl + alt + shift
+ L), so need the correct tool to properly reformat or not use it.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]