akashrn5 commented on a change in pull request #3584: [WIP] Support
SegmentLevel MinMax for better Pruning and less driver memory usage for cache
URL: https://github.com/apache/carbondata/pull/3584#discussion_r379378744
##########
File path:
integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/InsertTaskCompletionListener.scala
##########
@@ -17,19 +17,27 @@
package org.apache.carbondata.spark.rdd
+import scala.collection.JavaConverters._
+
import org.apache.spark.TaskContext
import
org.apache.spark.sql.carbondata.execution.datasources.tasklisteners.CarbonLoadTaskCompletionListener
import org.apache.spark.sql.execution.command.ExecutionErrors
+import org.apache.spark.util.CollectionAccumulator
-import org.apache.carbondata.core.util.{DataTypeUtil, ThreadLocalTaskInfo}
+import org.apache.carbondata.core.util.{DataTypeUtil, SegmentMinMax,
SegmentMinMaxStats, ThreadLocalTaskInfo}
import org.apache.carbondata.processing.loading.{DataLoadExecutor,
FailureCauses}
import org.apache.carbondata.spark.util.CommonUtil
class InsertTaskCompletionListener(dataLoadExecutor: DataLoadExecutor,
- executorErrors: ExecutionErrors)
+ executorErrors: ExecutionErrors,
+ accumulator: CollectionAccumulator[Map[String, List[SegmentMinMax]]])
extends CarbonLoadTaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
try {
+ // add segment level minMax to accumulator
+ accumulator.add(SegmentMinMaxStats.getInstance().getSegmentMinMaxMap.
+ asScala.mapValues(_.asScala.toList).toMap)
+ SegmentMinMaxStats.getInstance().clear()
Review comment:
can just be map.clear
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services