jackylk commented on a change in pull request #3535: [WIP] Refactory data 
loading for partition table
URL: https://github.com/apache/carbondata/pull/3535#discussion_r361857672
 
 

 ##########
 File path: 
integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
 ##########
 @@ -206,16 +215,193 @@ with Serializable {
 
 case class CarbonSQLHadoopMapReduceCommitProtocol(jobId: String, path: String, 
isAppend: Boolean)
   extends SQLHadoopMapReduceCommitProtocol(jobId, path, isAppend) {
+
+  override def setupTask(taskContext: TaskAttemptContext): Unit = {
+    if (isCarbonDataFlow(taskContext.getConfiguration)) {
+      
ThreadLocalSessionInfo.setConfigurationToCurrentThread(taskContext.getConfiguration)
+    }
+    super.setupTask(taskContext)
+  }
+
+  override def commitJob(jobContext: JobContext,
+      taskCommits: Seq[TaskCommitMessage]): Unit = {
+    if (isCarbonDataFlow(jobContext.getConfiguration)) {
+      var dataSize = 0L
+      val partitions =
+        taskCommits
+          .flatMap { taskCommit =>
+            taskCommit.obj match {
+              case (map: Map[String, String], _) =>
+                val partition = map.get("carbon.partitions")
+                val size = map.get("carbon.datasize")
+                if (size.isDefined) {
+                  dataSize = dataSize + java.lang.Long.parseLong(size.get)
+                }
+                if (partition.isDefined) {
+                  ObjectSerializationUtil
+                    .convertStringToObject(partition.get)
+                    .asInstanceOf[util.ArrayList[String]]
+                    .asScala
+                } else {
+                  Array.empty[String]
+                }
+              case _ => Array.empty[String]
+            }
+          }
+          .distinct
+          .toList
+          .asJava
+
+      jobContext.getConfiguration.set(
+        "carbon.output.partitions.name",
+        ObjectSerializationUtil.convertObjectToString(partitions))
+      jobContext.getConfiguration.set("carbon.datasize", dataSize.toString)
+
+      val newTaskCommits = taskCommits.map { taskCommit =>
+        taskCommit.obj match {
+          case (map: Map[String, String], set) =>
+            new TaskCommitMessage(
+              map
+                .filterNot(e => "carbon.partitions".equals(e._1) || 
"carbon.datasize".equals(e._1)),
+              set)
+          case _ => taskCommit
+        }
+      }
+      super
+        .commitJob(jobContext, newTaskCommits)
+    } else {
+      super
+        .commitJob(jobContext, taskCommits)
+    }
+  }
+
+  override def commitTask(
+      taskContext: TaskAttemptContext
+  ): FileCommitProtocol.TaskCommitMessage = {
+    var taskMsg = super.commitTask(taskContext)
+    if (isCarbonDataFlow(taskContext.getConfiguration)) {
+      ThreadLocalSessionInfo.unsetAll()
+      val partitions: String = 
taskContext.getConfiguration.get("carbon.output.partitions.name", "")
+      val files = taskContext.getConfiguration.get("carbon.output.files.name", 
"")
+      var sum = 0L
+      var indexSize = 0L
+      if (!StringUtils.isEmpty(files)) {
+        val filesList = ObjectSerializationUtil
+          .convertStringToObject(files)
+          .asInstanceOf[util.ArrayList[String]]
+          .asScala
+        for (file <- filesList) {
+          if (file.contains(".carbondata")) {
+            sum += 
java.lang.Long.parseLong(file.substring(file.lastIndexOf(":") + 1))
+          } else if (file.contains(".carbonindex")) {
+            indexSize += 
java.lang.Long.parseLong(file.substring(file.lastIndexOf(":") + 1))
+          }
+        }
+      }
+      if (!StringUtils.isEmpty(partitions)) {
+        taskMsg = taskMsg.obj match {
+          case (map: Map[String, String], set) =>
+            new TaskCommitMessage(
+              map ++ Map("carbon.partitions" -> partitions, "carbon.datasize" 
-> sum.toString),
+              set)
+          case _ => taskMsg
+        }
+      }
+      // Update outputMetrics with carbondata and index size
+      TaskContext.get().taskMetrics().outputMetrics.setBytesWritten(sum + 
indexSize)
+    }
+    taskMsg
+  }
+
+  override def abortTask(taskContext: TaskAttemptContext): Unit = {
+    super.abortTask(taskContext)
+    if (isCarbonDataFlow(taskContext.getConfiguration)) {
+      val files = taskContext.getConfiguration.get("carbon.output.files.name", 
"")
+      if (!StringUtils.isEmpty(files)) {
+        val filesList = ObjectSerializationUtil
+          .convertStringToObject(files)
+          .asInstanceOf[util.ArrayList[String]]
+          .asScala
+        for (file <- filesList) {
+          val outputFile: String = file.substring(0, file.lastIndexOf(":"))
+          if (outputFile.endsWith(CarbonTablePath.CARBON_DATA_EXT)) {
+            FileFactory
+              .deleteAllCarbonFilesOfDir(FileFactory
+                .getCarbonFile(outputFile,
+                  taskContext.getConfiguration))
+          }
+        }
+      }
+      ThreadLocalSessionInfo.unsetAll()
+    }
+  }
+
   override def newTaskTempFileAbsPath(taskContext: TaskAttemptContext,
       absoluteDir: String,
       ext: String): String = {
-    val carbonFlow = taskContext.getConfiguration.get("carbon.commit.protocol")
-    if (carbonFlow != null) {
+    if (isCarbonFileFlow(taskContext.getConfiguration) ||
+        isCarbonDataFlow(taskContext.getConfiguration)) {
       super.newTaskTempFile(taskContext, Some(absoluteDir), ext)
     } else {
       super.newTaskTempFileAbsPath(taskContext, absoluteDir, ext)
     }
   }
+
+  override def newTaskTempFile(taskContext: TaskAttemptContext,
 
 Review comment:
   add description,
   move `taskContext` to next line

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to